Merge tag 'topic/synopsys-media-formats-2017-04-03' of git://anongit.freedesktop...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 #if 0
106 static const struct ci_pt_defaults defaults_saturn_pro =
107 {
108         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
110         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
111 };
112 #endif
113
114 static const struct ci_pt_config_reg didt_config_ci[] =
115 {
116         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
188         { 0xFFFFFFFF }
189 };
190
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
192 {
193         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
194 }
195
196 #define MC_CG_ARB_FREQ_F0           0x0a
197 #define MC_CG_ARB_FREQ_F1           0x0b
198 #define MC_CG_ARB_FREQ_F2           0x0c
199 #define MC_CG_ARB_FREQ_F3           0x0d
200
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202                                        u32 arb_freq_src, u32 arb_freq_dest)
203 {
204         u32 mc_arb_dram_timing;
205         u32 mc_arb_dram_timing2;
206         u32 burst_time;
207         u32 mc_cg_config;
208
209         switch (arb_freq_src) {
210         case MC_CG_ARB_FREQ_F0:
211                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
212                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214                          MC_ARB_BURST_TIME__STATE0__SHIFT;
215                 break;
216         case MC_CG_ARB_FREQ_F1:
217                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
218                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220                          MC_ARB_BURST_TIME__STATE1__SHIFT;
221                 break;
222         default:
223                 return -EINVAL;
224         }
225
226         switch (arb_freq_dest) {
227         case MC_CG_ARB_FREQ_F0:
228                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231                         ~MC_ARB_BURST_TIME__STATE0_MASK);
232                 break;
233         case MC_CG_ARB_FREQ_F1:
234                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237                         ~MC_ARB_BURST_TIME__STATE1_MASK);
238                 break;
239         default:
240                 return -EINVAL;
241         }
242
243         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244         WREG32(mmMC_CG_CONFIG, mc_cg_config);
245         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
247
248         return 0;
249 }
250
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
252 {
253         u8 mc_para_index;
254
255         if (memory_clock < 10000)
256                 mc_para_index = 0;
257         else if (memory_clock >= 80000)
258                 mc_para_index = 0x0f;
259         else
260                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261         return mc_para_index;
262 }
263
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
265 {
266         u8 mc_para_index;
267
268         if (strobe_mode) {
269                 if (memory_clock < 12500)
270                         mc_para_index = 0x00;
271                 else if (memory_clock > 47500)
272                         mc_para_index = 0x0f;
273                 else
274                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
275         } else {
276                 if (memory_clock < 65000)
277                         mc_para_index = 0x00;
278                 else if (memory_clock > 135000)
279                         mc_para_index = 0x0f;
280                 else
281                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
282         }
283         return mc_para_index;
284 }
285
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287                                                      u32 max_voltage_steps,
288                                                      struct atom_voltage_table *voltage_table)
289 {
290         unsigned int i, diff;
291
292         if (voltage_table->count <= max_voltage_steps)
293                 return;
294
295         diff = voltage_table->count - max_voltage_steps;
296
297         for (i = 0; i < max_voltage_steps; i++)
298                 voltage_table->entries[i] = voltage_table->entries[i + diff];
299
300         voltage_table->count = max_voltage_steps;
301 }
302
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304                                          struct atom_voltage_table_entry *voltage_table,
305                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
308                                        u32 target_tdp);
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
312
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314                                                              PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
317
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
319 {
320         struct ci_power_info *pi = adev->pm.dpm.priv;
321
322         return pi;
323 }
324
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
326 {
327         struct ci_ps *ps = rps->ps_priv;
328
329         return ps;
330 }
331
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
333 {
334         struct ci_power_info *pi = ci_get_pi(adev);
335
336         switch (adev->pdev->device) {
337         case 0x6649:
338         case 0x6650:
339         case 0x6651:
340         case 0x6658:
341         case 0x665C:
342         case 0x665D:
343         default:
344                 pi->powertune_defaults = &defaults_bonaire_xt;
345                 break;
346         case 0x6640:
347         case 0x6641:
348         case 0x6646:
349         case 0x6647:
350                 pi->powertune_defaults = &defaults_saturn_xt;
351                 break;
352         case 0x67B8:
353         case 0x67B0:
354                 pi->powertune_defaults = &defaults_hawaii_xt;
355                 break;
356         case 0x67BA:
357         case 0x67B1:
358                 pi->powertune_defaults = &defaults_hawaii_pro;
359                 break;
360         case 0x67A0:
361         case 0x67A1:
362         case 0x67A2:
363         case 0x67A8:
364         case 0x67A9:
365         case 0x67AA:
366         case 0x67B9:
367         case 0x67BE:
368                 pi->powertune_defaults = &defaults_bonaire_xt;
369                 break;
370         }
371
372         pi->dte_tj_offset = 0;
373
374         pi->caps_power_containment = true;
375         pi->caps_cac = false;
376         pi->caps_sq_ramping = false;
377         pi->caps_db_ramping = false;
378         pi->caps_td_ramping = false;
379         pi->caps_tcp_ramping = false;
380
381         if (pi->caps_power_containment) {
382                 pi->caps_cac = true;
383                 if (adev->asic_type == CHIP_HAWAII)
384                         pi->enable_bapm_feature = false;
385                 else
386                         pi->enable_bapm_feature = true;
387                 pi->enable_tdc_limit_feature = true;
388                 pi->enable_pkg_pwr_tracking_feature = true;
389         }
390 }
391
392 static u8 ci_convert_to_vid(u16 vddc)
393 {
394         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
395 }
396
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
398 {
399         struct ci_power_info *pi = ci_get_pi(adev);
400         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
403         u32 i;
404
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
408                 return -EINVAL;
409         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
411                 return -EINVAL;
412
413         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
418                 } else {
419                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
421                 }
422         }
423         return 0;
424 }
425
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
427 {
428         struct ci_power_info *pi = ci_get_pi(adev);
429         u8 *vid = pi->smc_powertune_table.VddCVid;
430         u32 i;
431
432         if (pi->vddc_voltage_table.count > 8)
433                 return -EINVAL;
434
435         for (i = 0; i < pi->vddc_voltage_table.count; i++)
436                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
437
438         return 0;
439 }
440
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
442 {
443         struct ci_power_info *pi = ci_get_pi(adev);
444         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
445
446         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
450
451         return 0;
452 }
453
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
455 {
456         struct ci_power_info *pi = ci_get_pi(adev);
457         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
458         u16 tdc_limit;
459
460         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
465
466         return 0;
467 }
468
469 static int ci_populate_dw8(struct amdgpu_device *adev)
470 {
471         struct ci_power_info *pi = ci_get_pi(adev);
472         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
473         int ret;
474
475         ret = amdgpu_ci_read_smc_sram_dword(adev,
476                                      SMU7_FIRMWARE_HEADER_LOCATION +
477                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
478                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
480                                      pi->sram_end);
481         if (ret)
482                 return -EINVAL;
483         else
484                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
485
486         return 0;
487 }
488
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
490 {
491         struct ci_power_info *pi = ci_get_pi(adev);
492
493         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495                 adev->pm.dpm.fan.fan_output_sensitivity =
496                         adev->pm.dpm.fan.default_fan_output_sensitivity;
497
498         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
500
501         return 0;
502 }
503
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
505 {
506         struct ci_power_info *pi = ci_get_pi(adev);
507         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
509         int i, min, max;
510
511         min = max = hi_vid[0];
512         for (i = 0; i < 8; i++) {
513                 if (0 != hi_vid[i]) {
514                         if (min > hi_vid[i])
515                                 min = hi_vid[i];
516                         if (max < hi_vid[i])
517                                 max = hi_vid[i];
518                 }
519
520                 if (0 != lo_vid[i]) {
521                         if (min > lo_vid[i])
522                                 min = lo_vid[i];
523                         if (max < lo_vid[i])
524                                 max = lo_vid[i];
525                 }
526         }
527
528         if ((min == 0) || (max == 0))
529                 return -EINVAL;
530         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
532
533         return 0;
534 }
535
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
537 {
538         struct ci_power_info *pi = ci_get_pi(adev);
539         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541         struct amdgpu_cac_tdp_table *cac_tdp_table =
542                 adev->pm.dpm.dyn_state.cac_tdp_table;
543
544         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
546
547         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
549
550         return 0;
551 }
552
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
554 {
555         struct ci_power_info *pi = ci_get_pi(adev);
556         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
558         struct amdgpu_cac_tdp_table *cac_tdp_table =
559                 adev->pm.dpm.dyn_state.cac_tdp_table;
560         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
561         int i, j, k;
562         const u16 *def1;
563         const u16 *def2;
564
565         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
567
568         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569         dpm_table->GpuTjMax =
570                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571         dpm_table->GpuTjHyst = 8;
572
573         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
574
575         if (ppm) {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
578         } else {
579                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
581         }
582
583         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584         def1 = pt_defaults->bapmti_r;
585         def2 = pt_defaults->bapmti_rc;
586
587         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
590                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
592                                 def1++;
593                                 def2++;
594                         }
595                 }
596         }
597
598         return 0;
599 }
600
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
602 {
603         struct ci_power_info *pi = ci_get_pi(adev);
604         u32 pm_fuse_table_offset;
605         int ret;
606
607         if (pi->caps_power_containment) {
608                 ret = amdgpu_ci_read_smc_sram_dword(adev,
609                                              SMU7_FIRMWARE_HEADER_LOCATION +
610                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
611                                              &pm_fuse_table_offset, pi->sram_end);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_vddc_vid(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_svi_load_line(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_tdc_limit(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_dw8(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_populate_fuzzy_fan(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
633                 if (ret)
634                         return ret;
635                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
636                 if (ret)
637                         return ret;
638                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639                                            (u8 *)&pi->smc_powertune_table,
640                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
641                 if (ret)
642                         return ret;
643         }
644
645         return 0;
646 }
647
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
649 {
650         struct ci_power_info *pi = ci_get_pi(adev);
651         u32 data;
652
653         if (pi->caps_sq_ramping) {
654                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
655                 if (enable)
656                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 else
658                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
660         }
661
662         if (pi->caps_db_ramping) {
663                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
664                 if (enable)
665                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 else
667                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
669         }
670
671         if (pi->caps_td_ramping) {
672                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
673                 if (enable)
674                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 else
676                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
678         }
679
680         if (pi->caps_tcp_ramping) {
681                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
682                 if (enable)
683                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 else
685                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
687         }
688 }
689
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691                                           const struct ci_pt_config_reg *cac_config_regs)
692 {
693         const struct ci_pt_config_reg *config_regs = cac_config_regs;
694         u32 data;
695         u32 cache = 0;
696
697         if (config_regs == NULL)
698                 return -EINVAL;
699
700         while (config_regs->offset != 0xFFFFFFFF) {
701                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
703                 } else {
704                         switch (config_regs->type) {
705                         case CISLANDS_CONFIGREG_SMC_IND:
706                                 data = RREG32_SMC(config_regs->offset);
707                                 break;
708                         case CISLANDS_CONFIGREG_DIDT_IND:
709                                 data = RREG32_DIDT(config_regs->offset);
710                                 break;
711                         default:
712                                 data = RREG32(config_regs->offset);
713                                 break;
714                         }
715
716                         data &= ~config_regs->mask;
717                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
718                         data |= cache;
719
720                         switch (config_regs->type) {
721                         case CISLANDS_CONFIGREG_SMC_IND:
722                                 WREG32_SMC(config_regs->offset, data);
723                                 break;
724                         case CISLANDS_CONFIGREG_DIDT_IND:
725                                 WREG32_DIDT(config_regs->offset, data);
726                                 break;
727                         default:
728                                 WREG32(config_regs->offset, data);
729                                 break;
730                         }
731                         cache = 0;
732                 }
733                 config_regs++;
734         }
735         return 0;
736 }
737
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
739 {
740         struct ci_power_info *pi = ci_get_pi(adev);
741         int ret;
742
743         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744             pi->caps_td_ramping || pi->caps_tcp_ramping) {
745                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
746
747                 if (enable) {
748                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
749                         if (ret) {
750                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
751                                 return ret;
752                         }
753                 }
754
755                 ci_do_enable_didt(adev, enable);
756
757                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
758         }
759
760         return 0;
761 }
762
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
764 {
765         struct ci_power_info *pi = ci_get_pi(adev);
766         PPSMC_Result smc_result;
767         int ret = 0;
768
769         if (enable) {
770                 pi->power_containment_features = 0;
771                 if (pi->caps_power_containment) {
772                         if (pi->enable_bapm_feature) {
773                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774                                 if (smc_result != PPSMC_Result_OK)
775                                         ret = -EINVAL;
776                                 else
777                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
778                         }
779
780                         if (pi->enable_tdc_limit_feature) {
781                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782                                 if (smc_result != PPSMC_Result_OK)
783                                         ret = -EINVAL;
784                                 else
785                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
786                         }
787
788                         if (pi->enable_pkg_pwr_tracking_feature) {
789                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790                                 if (smc_result != PPSMC_Result_OK) {
791                                         ret = -EINVAL;
792                                 } else {
793                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
794                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
795                                         u32 default_pwr_limit =
796                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
797
798                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
799
800                                         ci_set_power_limit(adev, default_pwr_limit);
801                                 }
802                         }
803                 }
804         } else {
805                 if (pi->caps_power_containment && pi->power_containment_features) {
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
811
812                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814                         pi->power_containment_features = 0;
815                 }
816         }
817
818         return ret;
819 }
820
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
822 {
823         struct ci_power_info *pi = ci_get_pi(adev);
824         PPSMC_Result smc_result;
825         int ret = 0;
826
827         if (pi->caps_cac) {
828                 if (enable) {
829                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830                         if (smc_result != PPSMC_Result_OK) {
831                                 ret = -EINVAL;
832                                 pi->cac_enabled = false;
833                         } else {
834                                 pi->cac_enabled = true;
835                         }
836                 } else if (pi->cac_enabled) {
837                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838                         pi->cac_enabled = false;
839                 }
840         }
841
842         return ret;
843 }
844
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
846                                             bool enable)
847 {
848         struct ci_power_info *pi = ci_get_pi(adev);
849         PPSMC_Result smc_result = PPSMC_Result_OK;
850
851         if (pi->thermal_sclk_dpm_enabled) {
852                 if (enable)
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
854                 else
855                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
856         }
857
858         if (smc_result == PPSMC_Result_OK)
859                 return 0;
860         else
861                 return -EINVAL;
862 }
863
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
865 {
866         struct ci_power_info *pi = ci_get_pi(adev);
867         struct amdgpu_cac_tdp_table *cac_tdp_table =
868                 adev->pm.dpm.dyn_state.cac_tdp_table;
869         s32 adjust_percent;
870         s32 target_tdp;
871         int ret = 0;
872         bool adjust_polarity = false; /* ??? */
873
874         if (pi->caps_power_containment) {
875                 adjust_percent = adjust_polarity ?
876                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877                 target_tdp = ((100 + adjust_percent) *
878                               (s32)cac_tdp_table->configurable_tdp) / 100;
879
880                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
881         }
882
883         return ret;
884 }
885
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
887 {
888         struct ci_power_info *pi = ci_get_pi(adev);
889
890         pi->uvd_power_gated = gate;
891
892         if (gate) {
893                 /* stop the UVD block */
894                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
895                                                         AMD_PG_STATE_GATE);
896                 ci_update_uvd_dpm(adev, gate);
897         } else {
898                 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
899                                                         AMD_PG_STATE_UNGATE);
900                 ci_update_uvd_dpm(adev, gate);
901         }
902 }
903
904 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
905 {
906         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
907         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
908
909         if (vblank_time < switch_limit)
910                 return true;
911         else
912                 return false;
913
914 }
915
916 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
917                                         struct amdgpu_ps *rps)
918 {
919         struct ci_ps *ps = ci_get_ps(rps);
920         struct ci_power_info *pi = ci_get_pi(adev);
921         struct amdgpu_clock_and_voltage_limits *max_limits;
922         bool disable_mclk_switching;
923         u32 sclk, mclk;
924         int i;
925
926         if (rps->vce_active) {
927                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
928                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
929         } else {
930                 rps->evclk = 0;
931                 rps->ecclk = 0;
932         }
933
934         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
935             ci_dpm_vblank_too_short(adev))
936                 disable_mclk_switching = true;
937         else
938                 disable_mclk_switching = false;
939
940         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
941                 pi->battery_state = true;
942         else
943                 pi->battery_state = false;
944
945         if (adev->pm.dpm.ac_power)
946                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
947         else
948                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
949
950         if (adev->pm.dpm.ac_power == false) {
951                 for (i = 0; i < ps->performance_level_count; i++) {
952                         if (ps->performance_levels[i].mclk > max_limits->mclk)
953                                 ps->performance_levels[i].mclk = max_limits->mclk;
954                         if (ps->performance_levels[i].sclk > max_limits->sclk)
955                                 ps->performance_levels[i].sclk = max_limits->sclk;
956                 }
957         }
958
959         /* XXX validate the min clocks required for display */
960
961         if (disable_mclk_switching) {
962                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
963                 sclk = ps->performance_levels[0].sclk;
964         } else {
965                 mclk = ps->performance_levels[0].mclk;
966                 sclk = ps->performance_levels[0].sclk;
967         }
968
969         if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
970                 sclk = adev->pm.pm_display_cfg.min_core_set_clock;
971
972         if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
973                 mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
974
975         if (rps->vce_active) {
976                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
977                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
978                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
979                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
980         }
981
982         ps->performance_levels[0].sclk = sclk;
983         ps->performance_levels[0].mclk = mclk;
984
985         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
986                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
987
988         if (disable_mclk_switching) {
989                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
990                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
991         } else {
992                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
993                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
994         }
995 }
996
997 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
998                                             int min_temp, int max_temp)
999 {
1000         int low_temp = 0 * 1000;
1001         int high_temp = 255 * 1000;
1002         u32 tmp;
1003
1004         if (low_temp < min_temp)
1005                 low_temp = min_temp;
1006         if (high_temp > max_temp)
1007                 high_temp = max_temp;
1008         if (high_temp < low_temp) {
1009                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1010                 return -EINVAL;
1011         }
1012
1013         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1014         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1015         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1016                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1017         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1018
1019 #if 0
1020         /* XXX: need to figure out how to handle this properly */
1021         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1022         tmp &= DIG_THERM_DPM_MASK;
1023         tmp |= DIG_THERM_DPM(high_temp / 1000);
1024         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1025 #endif
1026
1027         adev->pm.dpm.thermal.min_temp = low_temp;
1028         adev->pm.dpm.thermal.max_temp = high_temp;
1029         return 0;
1030 }
1031
1032 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1033                                    bool enable)
1034 {
1035         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1036         PPSMC_Result result;
1037
1038         if (enable) {
1039                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1040                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1041                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1042                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1043                 if (result != PPSMC_Result_OK) {
1044                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1045                         return -EINVAL;
1046                 }
1047         } else {
1048                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1049                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1050                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1051                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1052                 if (result != PPSMC_Result_OK) {
1053                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1054                         return -EINVAL;
1055                 }
1056         }
1057
1058         return 0;
1059 }
1060
1061 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1062 {
1063         struct ci_power_info *pi = ci_get_pi(adev);
1064         u32 tmp;
1065
1066         if (pi->fan_ctrl_is_in_default_mode) {
1067                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1068                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1069                 pi->fan_ctrl_default_mode = tmp;
1070                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1071                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1072                 pi->t_min = tmp;
1073                 pi->fan_ctrl_is_in_default_mode = false;
1074         }
1075
1076         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1077         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1078         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1079
1080         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1081         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1082         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1083 }
1084
1085 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1086 {
1087         struct ci_power_info *pi = ci_get_pi(adev);
1088         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1089         u32 duty100;
1090         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1091         u16 fdo_min, slope1, slope2;
1092         u32 reference_clock, tmp;
1093         int ret;
1094         u64 tmp64;
1095
1096         if (!pi->fan_table_start) {
1097                 adev->pm.dpm.fan.ucode_fan_control = false;
1098                 return 0;
1099         }
1100
1101         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1102                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1103
1104         if (duty100 == 0) {
1105                 adev->pm.dpm.fan.ucode_fan_control = false;
1106                 return 0;
1107         }
1108
1109         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1110         do_div(tmp64, 10000);
1111         fdo_min = (u16)tmp64;
1112
1113         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1114         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1115
1116         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1117         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1118
1119         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1120         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1121
1122         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1123         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1124         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1125
1126         fan_table.Slope1 = cpu_to_be16(slope1);
1127         fan_table.Slope2 = cpu_to_be16(slope2);
1128
1129         fan_table.FdoMin = cpu_to_be16(fdo_min);
1130
1131         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1132
1133         fan_table.HystUp = cpu_to_be16(1);
1134
1135         fan_table.HystSlope = cpu_to_be16(1);
1136
1137         fan_table.TempRespLim = cpu_to_be16(5);
1138
1139         reference_clock = amdgpu_asic_get_xclk(adev);
1140
1141         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1142                                                reference_clock) / 1600);
1143
1144         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1145
1146         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1147                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1148         fan_table.TempSrc = (uint8_t)tmp;
1149
1150         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1151                                           pi->fan_table_start,
1152                                           (u8 *)(&fan_table),
1153                                           sizeof(fan_table),
1154                                           pi->sram_end);
1155
1156         if (ret) {
1157                 DRM_ERROR("Failed to load fan table to the SMC.");
1158                 adev->pm.dpm.fan.ucode_fan_control = false;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1165 {
1166         struct ci_power_info *pi = ci_get_pi(adev);
1167         PPSMC_Result ret;
1168
1169         if (pi->caps_od_fuzzy_fan_control_support) {
1170                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1171                                                                PPSMC_StartFanControl,
1172                                                                FAN_CONTROL_FUZZY);
1173                 if (ret != PPSMC_Result_OK)
1174                         return -EINVAL;
1175                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1176                                                                PPSMC_MSG_SetFanPwmMax,
1177                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1178                 if (ret != PPSMC_Result_OK)
1179                         return -EINVAL;
1180         } else {
1181                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1182                                                                PPSMC_StartFanControl,
1183                                                                FAN_CONTROL_TABLE);
1184                 if (ret != PPSMC_Result_OK)
1185                         return -EINVAL;
1186         }
1187
1188         pi->fan_is_controlled_by_smc = true;
1189         return 0;
1190 }
1191
1192
1193 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1194 {
1195         PPSMC_Result ret;
1196         struct ci_power_info *pi = ci_get_pi(adev);
1197
1198         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1199         if (ret == PPSMC_Result_OK) {
1200                 pi->fan_is_controlled_by_smc = false;
1201                 return 0;
1202         } else {
1203                 return -EINVAL;
1204         }
1205 }
1206
1207 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1208                                         u32 *speed)
1209 {
1210         u32 duty, duty100;
1211         u64 tmp64;
1212
1213         if (adev->pm.no_fan)
1214                 return -ENOENT;
1215
1216         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1217                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1218         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1219                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1220
1221         if (duty100 == 0)
1222                 return -EINVAL;
1223
1224         tmp64 = (u64)duty * 100;
1225         do_div(tmp64, duty100);
1226         *speed = (u32)tmp64;
1227
1228         if (*speed > 100)
1229                 *speed = 100;
1230
1231         return 0;
1232 }
1233
1234 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1235                                         u32 speed)
1236 {
1237         u32 tmp;
1238         u32 duty, duty100;
1239         u64 tmp64;
1240         struct ci_power_info *pi = ci_get_pi(adev);
1241
1242         if (adev->pm.no_fan)
1243                 return -ENOENT;
1244
1245         if (pi->fan_is_controlled_by_smc)
1246                 return -EINVAL;
1247
1248         if (speed > 100)
1249                 return -EINVAL;
1250
1251         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1252                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1253
1254         if (duty100 == 0)
1255                 return -EINVAL;
1256
1257         tmp64 = (u64)speed * duty100;
1258         do_div(tmp64, 100);
1259         duty = (u32)tmp64;
1260
1261         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1262         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1263         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1264
1265         return 0;
1266 }
1267
1268 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1269 {
1270         if (mode) {
1271                 /* stop auto-manage */
1272                 if (adev->pm.dpm.fan.ucode_fan_control)
1273                         ci_fan_ctrl_stop_smc_fan_control(adev);
1274                 ci_fan_ctrl_set_static_mode(adev, mode);
1275         } else {
1276                 /* restart auto-manage */
1277                 if (adev->pm.dpm.fan.ucode_fan_control)
1278                         ci_thermal_start_smc_fan_control(adev);
1279                 else
1280                         ci_fan_ctrl_set_default_mode(adev);
1281         }
1282 }
1283
1284 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1285 {
1286         struct ci_power_info *pi = ci_get_pi(adev);
1287         u32 tmp;
1288
1289         if (pi->fan_is_controlled_by_smc)
1290                 return 0;
1291
1292         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1293         return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1294 }
1295
1296 #if 0
1297 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1298                                          u32 *speed)
1299 {
1300         u32 tach_period;
1301         u32 xclk = amdgpu_asic_get_xclk(adev);
1302
1303         if (adev->pm.no_fan)
1304                 return -ENOENT;
1305
1306         if (adev->pm.fan_pulses_per_revolution == 0)
1307                 return -ENOENT;
1308
1309         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1310                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1311         if (tach_period == 0)
1312                 return -ENOENT;
1313
1314         *speed = 60 * xclk * 10000 / tach_period;
1315
1316         return 0;
1317 }
1318
1319 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1320                                          u32 speed)
1321 {
1322         u32 tach_period, tmp;
1323         u32 xclk = amdgpu_asic_get_xclk(adev);
1324
1325         if (adev->pm.no_fan)
1326                 return -ENOENT;
1327
1328         if (adev->pm.fan_pulses_per_revolution == 0)
1329                 return -ENOENT;
1330
1331         if ((speed < adev->pm.fan_min_rpm) ||
1332             (speed > adev->pm.fan_max_rpm))
1333                 return -EINVAL;
1334
1335         if (adev->pm.dpm.fan.ucode_fan_control)
1336                 ci_fan_ctrl_stop_smc_fan_control(adev);
1337
1338         tach_period = 60 * xclk * 10000 / (8 * speed);
1339         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1340         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1341         WREG32_SMC(CG_TACH_CTRL, tmp);
1342
1343         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1344
1345         return 0;
1346 }
1347 #endif
1348
1349 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1350 {
1351         struct ci_power_info *pi = ci_get_pi(adev);
1352         u32 tmp;
1353
1354         if (!pi->fan_ctrl_is_in_default_mode) {
1355                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1356                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1357                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1358
1359                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1360                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1361                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1362                 pi->fan_ctrl_is_in_default_mode = true;
1363         }
1364 }
1365
1366 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1367 {
1368         if (adev->pm.dpm.fan.ucode_fan_control) {
1369                 ci_fan_ctrl_start_smc_fan_control(adev);
1370                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1371         }
1372 }
1373
1374 static void ci_thermal_initialize(struct amdgpu_device *adev)
1375 {
1376         u32 tmp;
1377
1378         if (adev->pm.fan_pulses_per_revolution) {
1379                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1380                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1381                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1382                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1383         }
1384
1385         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1386         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1387         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1388 }
1389
1390 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1391 {
1392         int ret;
1393
1394         ci_thermal_initialize(adev);
1395         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1396         if (ret)
1397                 return ret;
1398         ret = ci_thermal_enable_alert(adev, true);
1399         if (ret)
1400                 return ret;
1401         if (adev->pm.dpm.fan.ucode_fan_control) {
1402                 ret = ci_thermal_setup_fan_table(adev);
1403                 if (ret)
1404                         return ret;
1405                 ci_thermal_start_smc_fan_control(adev);
1406         }
1407
1408         return 0;
1409 }
1410
1411 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1412 {
1413         if (!adev->pm.no_fan)
1414                 ci_fan_ctrl_set_default_mode(adev);
1415 }
1416
1417 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1418                                      u16 reg_offset, u32 *value)
1419 {
1420         struct ci_power_info *pi = ci_get_pi(adev);
1421
1422         return amdgpu_ci_read_smc_sram_dword(adev,
1423                                       pi->soft_regs_start + reg_offset,
1424                                       value, pi->sram_end);
1425 }
1426
1427 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1428                                       u16 reg_offset, u32 value)
1429 {
1430         struct ci_power_info *pi = ci_get_pi(adev);
1431
1432         return amdgpu_ci_write_smc_sram_dword(adev,
1433                                        pi->soft_regs_start + reg_offset,
1434                                        value, pi->sram_end);
1435 }
1436
1437 static void ci_init_fps_limits(struct amdgpu_device *adev)
1438 {
1439         struct ci_power_info *pi = ci_get_pi(adev);
1440         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1441
1442         if (pi->caps_fps) {
1443                 u16 tmp;
1444
1445                 tmp = 45;
1446                 table->FpsHighT = cpu_to_be16(tmp);
1447
1448                 tmp = 30;
1449                 table->FpsLowT = cpu_to_be16(tmp);
1450         }
1451 }
1452
1453 static int ci_update_sclk_t(struct amdgpu_device *adev)
1454 {
1455         struct ci_power_info *pi = ci_get_pi(adev);
1456         int ret = 0;
1457         u32 low_sclk_interrupt_t = 0;
1458
1459         if (pi->caps_sclk_throttle_low_notification) {
1460                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1461
1462                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1463                                            pi->dpm_table_start +
1464                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1465                                            (u8 *)&low_sclk_interrupt_t,
1466                                            sizeof(u32), pi->sram_end);
1467
1468         }
1469
1470         return ret;
1471 }
1472
1473 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1474 {
1475         struct ci_power_info *pi = ci_get_pi(adev);
1476         u16 leakage_id, virtual_voltage_id;
1477         u16 vddc, vddci;
1478         int i;
1479
1480         pi->vddc_leakage.count = 0;
1481         pi->vddci_leakage.count = 0;
1482
1483         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1484                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1485                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1486                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1487                                 continue;
1488                         if (vddc != 0 && vddc != virtual_voltage_id) {
1489                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1490                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1491                                 pi->vddc_leakage.count++;
1492                         }
1493                 }
1494         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1495                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1496                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1497                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1498                                                                                      virtual_voltage_id,
1499                                                                                      leakage_id) == 0) {
1500                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1501                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1502                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1503                                         pi->vddc_leakage.count++;
1504                                 }
1505                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1506                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1507                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1508                                         pi->vddci_leakage.count++;
1509                                 }
1510                         }
1511                 }
1512         }
1513 }
1514
1515 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1516 {
1517         struct ci_power_info *pi = ci_get_pi(adev);
1518         bool want_thermal_protection;
1519         enum amdgpu_dpm_event_src dpm_event_src;
1520         u32 tmp;
1521
1522         switch (sources) {
1523         case 0:
1524         default:
1525                 want_thermal_protection = false;
1526                 break;
1527         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1528                 want_thermal_protection = true;
1529                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1530                 break;
1531         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1532                 want_thermal_protection = true;
1533                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1534                 break;
1535         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1536               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1537                 want_thermal_protection = true;
1538                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1539                 break;
1540         }
1541
1542         if (want_thermal_protection) {
1543 #if 0
1544                 /* XXX: need to figure out how to handle this properly */
1545                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1546                 tmp &= DPM_EVENT_SRC_MASK;
1547                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1548                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1549 #endif
1550
1551                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1552                 if (pi->thermal_protection)
1553                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1554                 else
1555                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1556                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1557         } else {
1558                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1559                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1560                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1561         }
1562 }
1563
1564 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1565                                            enum amdgpu_dpm_auto_throttle_src source,
1566                                            bool enable)
1567 {
1568         struct ci_power_info *pi = ci_get_pi(adev);
1569
1570         if (enable) {
1571                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1572                         pi->active_auto_throttle_sources |= 1 << source;
1573                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1574                 }
1575         } else {
1576                 if (pi->active_auto_throttle_sources & (1 << source)) {
1577                         pi->active_auto_throttle_sources &= ~(1 << source);
1578                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1579                 }
1580         }
1581 }
1582
1583 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1584 {
1585         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1586                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1587 }
1588
1589 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1590 {
1591         struct ci_power_info *pi = ci_get_pi(adev);
1592         PPSMC_Result smc_result;
1593
1594         if (!pi->need_update_smu7_dpm_table)
1595                 return 0;
1596
1597         if ((!pi->sclk_dpm_key_disabled) &&
1598             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1599                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1600                 if (smc_result != PPSMC_Result_OK)
1601                         return -EINVAL;
1602         }
1603
1604         if ((!pi->mclk_dpm_key_disabled) &&
1605             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1606                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1607                 if (smc_result != PPSMC_Result_OK)
1608                         return -EINVAL;
1609         }
1610
1611         pi->need_update_smu7_dpm_table = 0;
1612         return 0;
1613 }
1614
1615 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1616 {
1617         struct ci_power_info *pi = ci_get_pi(adev);
1618         PPSMC_Result smc_result;
1619
1620         if (enable) {
1621                 if (!pi->sclk_dpm_key_disabled) {
1622                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1623                         if (smc_result != PPSMC_Result_OK)
1624                                 return -EINVAL;
1625                 }
1626
1627                 if (!pi->mclk_dpm_key_disabled) {
1628                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1629                         if (smc_result != PPSMC_Result_OK)
1630                                 return -EINVAL;
1631
1632                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1633                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1634
1635                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1636                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1637                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1638
1639                         udelay(10);
1640
1641                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1642                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1643                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1644                 }
1645         } else {
1646                 if (!pi->sclk_dpm_key_disabled) {
1647                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1648                         if (smc_result != PPSMC_Result_OK)
1649                                 return -EINVAL;
1650                 }
1651
1652                 if (!pi->mclk_dpm_key_disabled) {
1653                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1654                         if (smc_result != PPSMC_Result_OK)
1655                                 return -EINVAL;
1656                 }
1657         }
1658
1659         return 0;
1660 }
1661
1662 static int ci_start_dpm(struct amdgpu_device *adev)
1663 {
1664         struct ci_power_info *pi = ci_get_pi(adev);
1665         PPSMC_Result smc_result;
1666         int ret;
1667         u32 tmp;
1668
1669         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1670         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1671         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1672
1673         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1674         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1675         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1676
1677         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1678
1679         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1680
1681         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1682         if (smc_result != PPSMC_Result_OK)
1683                 return -EINVAL;
1684
1685         ret = ci_enable_sclk_mclk_dpm(adev, true);
1686         if (ret)
1687                 return ret;
1688
1689         if (!pi->pcie_dpm_key_disabled) {
1690                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1691                 if (smc_result != PPSMC_Result_OK)
1692                         return -EINVAL;
1693         }
1694
1695         return 0;
1696 }
1697
1698 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1699 {
1700         struct ci_power_info *pi = ci_get_pi(adev);
1701         PPSMC_Result smc_result;
1702
1703         if (!pi->need_update_smu7_dpm_table)
1704                 return 0;
1705
1706         if ((!pi->sclk_dpm_key_disabled) &&
1707             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1708                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1709                 if (smc_result != PPSMC_Result_OK)
1710                         return -EINVAL;
1711         }
1712
1713         if ((!pi->mclk_dpm_key_disabled) &&
1714             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1715                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1716                 if (smc_result != PPSMC_Result_OK)
1717                         return -EINVAL;
1718         }
1719
1720         return 0;
1721 }
1722
1723 static int ci_stop_dpm(struct amdgpu_device *adev)
1724 {
1725         struct ci_power_info *pi = ci_get_pi(adev);
1726         PPSMC_Result smc_result;
1727         int ret;
1728         u32 tmp;
1729
1730         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1731         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1732         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1733
1734         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1735         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1736         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1737
1738         if (!pi->pcie_dpm_key_disabled) {
1739                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1740                 if (smc_result != PPSMC_Result_OK)
1741                         return -EINVAL;
1742         }
1743
1744         ret = ci_enable_sclk_mclk_dpm(adev, false);
1745         if (ret)
1746                 return ret;
1747
1748         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1749         if (smc_result != PPSMC_Result_OK)
1750                 return -EINVAL;
1751
1752         return 0;
1753 }
1754
1755 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1756 {
1757         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1758
1759         if (enable)
1760                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1761         else
1762                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1763         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1764 }
1765
1766 #if 0
1767 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1768                                         bool ac_power)
1769 {
1770         struct ci_power_info *pi = ci_get_pi(adev);
1771         struct amdgpu_cac_tdp_table *cac_tdp_table =
1772                 adev->pm.dpm.dyn_state.cac_tdp_table;
1773         u32 power_limit;
1774
1775         if (ac_power)
1776                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1777         else
1778                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1779
1780         ci_set_power_limit(adev, power_limit);
1781
1782         if (pi->caps_automatic_dc_transition) {
1783                 if (ac_power)
1784                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1785                 else
1786                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1787         }
1788
1789         return 0;
1790 }
1791 #endif
1792
1793 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1794                                                       PPSMC_Msg msg, u32 parameter)
1795 {
1796         WREG32(mmSMC_MSG_ARG_0, parameter);
1797         return amdgpu_ci_send_msg_to_smc(adev, msg);
1798 }
1799
1800 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1801                                                         PPSMC_Msg msg, u32 *parameter)
1802 {
1803         PPSMC_Result smc_result;
1804
1805         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1806
1807         if ((smc_result == PPSMC_Result_OK) && parameter)
1808                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1809
1810         return smc_result;
1811 }
1812
1813 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1814 {
1815         struct ci_power_info *pi = ci_get_pi(adev);
1816
1817         if (!pi->sclk_dpm_key_disabled) {
1818                 PPSMC_Result smc_result =
1819                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1820                 if (smc_result != PPSMC_Result_OK)
1821                         return -EINVAL;
1822         }
1823
1824         return 0;
1825 }
1826
1827 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1828 {
1829         struct ci_power_info *pi = ci_get_pi(adev);
1830
1831         if (!pi->mclk_dpm_key_disabled) {
1832                 PPSMC_Result smc_result =
1833                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1834                 if (smc_result != PPSMC_Result_OK)
1835                         return -EINVAL;
1836         }
1837
1838         return 0;
1839 }
1840
1841 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1842 {
1843         struct ci_power_info *pi = ci_get_pi(adev);
1844
1845         if (!pi->pcie_dpm_key_disabled) {
1846                 PPSMC_Result smc_result =
1847                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1848                 if (smc_result != PPSMC_Result_OK)
1849                         return -EINVAL;
1850         }
1851
1852         return 0;
1853 }
1854
1855 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1856 {
1857         struct ci_power_info *pi = ci_get_pi(adev);
1858
1859         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1860                 PPSMC_Result smc_result =
1861                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1862                 if (smc_result != PPSMC_Result_OK)
1863                         return -EINVAL;
1864         }
1865
1866         return 0;
1867 }
1868
1869 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1870                                        u32 target_tdp)
1871 {
1872         PPSMC_Result smc_result =
1873                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1874         if (smc_result != PPSMC_Result_OK)
1875                 return -EINVAL;
1876         return 0;
1877 }
1878
1879 #if 0
1880 static int ci_set_boot_state(struct amdgpu_device *adev)
1881 {
1882         return ci_enable_sclk_mclk_dpm(adev, false);
1883 }
1884 #endif
1885
1886 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1887 {
1888         u32 sclk_freq;
1889         PPSMC_Result smc_result =
1890                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1891                                                     PPSMC_MSG_API_GetSclkFrequency,
1892                                                     &sclk_freq);
1893         if (smc_result != PPSMC_Result_OK)
1894                 sclk_freq = 0;
1895
1896         return sclk_freq;
1897 }
1898
1899 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1900 {
1901         u32 mclk_freq;
1902         PPSMC_Result smc_result =
1903                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1904                                                     PPSMC_MSG_API_GetMclkFrequency,
1905                                                     &mclk_freq);
1906         if (smc_result != PPSMC_Result_OK)
1907                 mclk_freq = 0;
1908
1909         return mclk_freq;
1910 }
1911
1912 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1913 {
1914         int i;
1915
1916         amdgpu_ci_program_jump_on_start(adev);
1917         amdgpu_ci_start_smc_clock(adev);
1918         amdgpu_ci_start_smc(adev);
1919         for (i = 0; i < adev->usec_timeout; i++) {
1920                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1921                         break;
1922         }
1923 }
1924
1925 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1926 {
1927         amdgpu_ci_reset_smc(adev);
1928         amdgpu_ci_stop_smc_clock(adev);
1929 }
1930
1931 static int ci_process_firmware_header(struct amdgpu_device *adev)
1932 {
1933         struct ci_power_info *pi = ci_get_pi(adev);
1934         u32 tmp;
1935         int ret;
1936
1937         ret = amdgpu_ci_read_smc_sram_dword(adev,
1938                                      SMU7_FIRMWARE_HEADER_LOCATION +
1939                                      offsetof(SMU7_Firmware_Header, DpmTable),
1940                                      &tmp, pi->sram_end);
1941         if (ret)
1942                 return ret;
1943
1944         pi->dpm_table_start = tmp;
1945
1946         ret = amdgpu_ci_read_smc_sram_dword(adev,
1947                                      SMU7_FIRMWARE_HEADER_LOCATION +
1948                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1949                                      &tmp, pi->sram_end);
1950         if (ret)
1951                 return ret;
1952
1953         pi->soft_regs_start = tmp;
1954
1955         ret = amdgpu_ci_read_smc_sram_dword(adev,
1956                                      SMU7_FIRMWARE_HEADER_LOCATION +
1957                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1958                                      &tmp, pi->sram_end);
1959         if (ret)
1960                 return ret;
1961
1962         pi->mc_reg_table_start = tmp;
1963
1964         ret = amdgpu_ci_read_smc_sram_dword(adev,
1965                                      SMU7_FIRMWARE_HEADER_LOCATION +
1966                                      offsetof(SMU7_Firmware_Header, FanTable),
1967                                      &tmp, pi->sram_end);
1968         if (ret)
1969                 return ret;
1970
1971         pi->fan_table_start = tmp;
1972
1973         ret = amdgpu_ci_read_smc_sram_dword(adev,
1974                                      SMU7_FIRMWARE_HEADER_LOCATION +
1975                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1976                                      &tmp, pi->sram_end);
1977         if (ret)
1978                 return ret;
1979
1980         pi->arb_table_start = tmp;
1981
1982         return 0;
1983 }
1984
1985 static void ci_read_clock_registers(struct amdgpu_device *adev)
1986 {
1987         struct ci_power_info *pi = ci_get_pi(adev);
1988
1989         pi->clock_registers.cg_spll_func_cntl =
1990                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1991         pi->clock_registers.cg_spll_func_cntl_2 =
1992                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1993         pi->clock_registers.cg_spll_func_cntl_3 =
1994                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1995         pi->clock_registers.cg_spll_func_cntl_4 =
1996                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1997         pi->clock_registers.cg_spll_spread_spectrum =
1998                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1999         pi->clock_registers.cg_spll_spread_spectrum_2 =
2000                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
2001         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
2002         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
2003         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
2004         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
2005         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
2006         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
2007         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
2008         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
2009         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2010 }
2011
2012 static void ci_init_sclk_t(struct amdgpu_device *adev)
2013 {
2014         struct ci_power_info *pi = ci_get_pi(adev);
2015
2016         pi->low_sclk_interrupt_t = 0;
2017 }
2018
2019 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2020                                          bool enable)
2021 {
2022         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2023
2024         if (enable)
2025                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2026         else
2027                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2028         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2029 }
2030
2031 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2032 {
2033         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2034
2035         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2036
2037         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2038 }
2039
2040 #if 0
2041 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2042 {
2043
2044         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2045
2046         udelay(25000);
2047
2048         return 0;
2049 }
2050
2051 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2052 {
2053         int i;
2054
2055         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2056
2057         udelay(7000);
2058
2059         for (i = 0; i < adev->usec_timeout; i++) {
2060                 if (RREG32(mmSMC_RESP_0) == 1)
2061                         break;
2062                 udelay(1000);
2063         }
2064
2065         return 0;
2066 }
2067 #endif
2068
2069 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2070                                         bool has_display)
2071 {
2072         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2073
2074         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2075 }
2076
2077 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2078                                       bool enable)
2079 {
2080         struct ci_power_info *pi = ci_get_pi(adev);
2081
2082         if (enable) {
2083                 if (pi->caps_sclk_ds) {
2084                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2085                                 return -EINVAL;
2086                 } else {
2087                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2088                                 return -EINVAL;
2089                 }
2090         } else {
2091                 if (pi->caps_sclk_ds) {
2092                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2093                                 return -EINVAL;
2094                 }
2095         }
2096
2097         return 0;
2098 }
2099
2100 static void ci_program_display_gap(struct amdgpu_device *adev)
2101 {
2102         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2103         u32 pre_vbi_time_in_us;
2104         u32 frame_time_in_us;
2105         u32 ref_clock = adev->clock.spll.reference_freq;
2106         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2107         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2108
2109         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2110         if (adev->pm.dpm.new_active_crtc_count > 0)
2111                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2112         else
2113                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2114         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2115
2116         if (refresh_rate == 0)
2117                 refresh_rate = 60;
2118         if (vblank_time == 0xffffffff)
2119                 vblank_time = 500;
2120         frame_time_in_us = 1000000 / refresh_rate;
2121         pre_vbi_time_in_us =
2122                 frame_time_in_us - 200 - vblank_time;
2123         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2124
2125         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2126         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2127         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2128
2129
2130         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2131
2132 }
2133
2134 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2135 {
2136         struct ci_power_info *pi = ci_get_pi(adev);
2137         u32 tmp;
2138
2139         if (enable) {
2140                 if (pi->caps_sclk_ss_support) {
2141                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2142                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2143                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2144                 }
2145         } else {
2146                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2147                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2148                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2149
2150                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2151                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2152                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2153         }
2154 }
2155
2156 static void ci_program_sstp(struct amdgpu_device *adev)
2157 {
2158         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2159         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2160          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2161 }
2162
2163 static void ci_enable_display_gap(struct amdgpu_device *adev)
2164 {
2165         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2166
2167         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2168                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2169         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2170                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2171
2172         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2173 }
2174
2175 static void ci_program_vc(struct amdgpu_device *adev)
2176 {
2177         u32 tmp;
2178
2179         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2180         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2181         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2182
2183         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2184         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2185         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2186         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2191 }
2192
2193 static void ci_clear_vc(struct amdgpu_device *adev)
2194 {
2195         u32 tmp;
2196
2197         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2198         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2199         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2200
2201         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2202         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2203         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2204         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2205         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2206         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2207         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2208         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2209 }
2210
2211 static int ci_upload_firmware(struct amdgpu_device *adev)
2212 {
2213         int i, ret;
2214
2215         if (amdgpu_ci_is_smc_running(adev)) {
2216                 DRM_INFO("smc is running, no need to load smc firmware\n");
2217                 return 0;
2218         }
2219
2220         for (i = 0; i < adev->usec_timeout; i++) {
2221                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2222                         break;
2223         }
2224         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2225
2226         amdgpu_ci_stop_smc_clock(adev);
2227         amdgpu_ci_reset_smc(adev);
2228
2229         ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
2230
2231         return ret;
2232
2233 }
2234
2235 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2236                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2237                                      struct atom_voltage_table *voltage_table)
2238 {
2239         u32 i;
2240
2241         if (voltage_dependency_table == NULL)
2242                 return -EINVAL;
2243
2244         voltage_table->mask_low = 0;
2245         voltage_table->phase_delay = 0;
2246
2247         voltage_table->count = voltage_dependency_table->count;
2248         for (i = 0; i < voltage_table->count; i++) {
2249                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2250                 voltage_table->entries[i].smio_low = 0;
2251         }
2252
2253         return 0;
2254 }
2255
2256 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2257 {
2258         struct ci_power_info *pi = ci_get_pi(adev);
2259         int ret;
2260
2261         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2262                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2263                                                         VOLTAGE_OBJ_GPIO_LUT,
2264                                                         &pi->vddc_voltage_table);
2265                 if (ret)
2266                         return ret;
2267         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2268                 ret = ci_get_svi2_voltage_table(adev,
2269                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2270                                                 &pi->vddc_voltage_table);
2271                 if (ret)
2272                         return ret;
2273         }
2274
2275         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2276                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2277                                                          &pi->vddc_voltage_table);
2278
2279         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2280                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2281                                                         VOLTAGE_OBJ_GPIO_LUT,
2282                                                         &pi->vddci_voltage_table);
2283                 if (ret)
2284                         return ret;
2285         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2286                 ret = ci_get_svi2_voltage_table(adev,
2287                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2288                                                 &pi->vddci_voltage_table);
2289                 if (ret)
2290                         return ret;
2291         }
2292
2293         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2294                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2295                                                          &pi->vddci_voltage_table);
2296
2297         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2298                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2299                                                         VOLTAGE_OBJ_GPIO_LUT,
2300                                                         &pi->mvdd_voltage_table);
2301                 if (ret)
2302                         return ret;
2303         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2304                 ret = ci_get_svi2_voltage_table(adev,
2305                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2306                                                 &pi->mvdd_voltage_table);
2307                 if (ret)
2308                         return ret;
2309         }
2310
2311         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2312                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2313                                                          &pi->mvdd_voltage_table);
2314
2315         return 0;
2316 }
2317
2318 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2319                                           struct atom_voltage_table_entry *voltage_table,
2320                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2321 {
2322         int ret;
2323
2324         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2325                                             &smc_voltage_table->StdVoltageHiSidd,
2326                                             &smc_voltage_table->StdVoltageLoSidd);
2327
2328         if (ret) {
2329                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2330                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2331         }
2332
2333         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2334         smc_voltage_table->StdVoltageHiSidd =
2335                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2336         smc_voltage_table->StdVoltageLoSidd =
2337                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2338 }
2339
2340 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2341                                       SMU7_Discrete_DpmTable *table)
2342 {
2343         struct ci_power_info *pi = ci_get_pi(adev);
2344         unsigned int count;
2345
2346         table->VddcLevelCount = pi->vddc_voltage_table.count;
2347         for (count = 0; count < table->VddcLevelCount; count++) {
2348                 ci_populate_smc_voltage_table(adev,
2349                                               &pi->vddc_voltage_table.entries[count],
2350                                               &table->VddcLevel[count]);
2351
2352                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2353                         table->VddcLevel[count].Smio |=
2354                                 pi->vddc_voltage_table.entries[count].smio_low;
2355                 else
2356                         table->VddcLevel[count].Smio = 0;
2357         }
2358         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2359
2360         return 0;
2361 }
2362
2363 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2364                                        SMU7_Discrete_DpmTable *table)
2365 {
2366         unsigned int count;
2367         struct ci_power_info *pi = ci_get_pi(adev);
2368
2369         table->VddciLevelCount = pi->vddci_voltage_table.count;
2370         for (count = 0; count < table->VddciLevelCount; count++) {
2371                 ci_populate_smc_voltage_table(adev,
2372                                               &pi->vddci_voltage_table.entries[count],
2373                                               &table->VddciLevel[count]);
2374
2375                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2376                         table->VddciLevel[count].Smio |=
2377                                 pi->vddci_voltage_table.entries[count].smio_low;
2378                 else
2379                         table->VddciLevel[count].Smio = 0;
2380         }
2381         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2382
2383         return 0;
2384 }
2385
2386 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2387                                       SMU7_Discrete_DpmTable *table)
2388 {
2389         struct ci_power_info *pi = ci_get_pi(adev);
2390         unsigned int count;
2391
2392         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2393         for (count = 0; count < table->MvddLevelCount; count++) {
2394                 ci_populate_smc_voltage_table(adev,
2395                                               &pi->mvdd_voltage_table.entries[count],
2396                                               &table->MvddLevel[count]);
2397
2398                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2399                         table->MvddLevel[count].Smio |=
2400                                 pi->mvdd_voltage_table.entries[count].smio_low;
2401                 else
2402                         table->MvddLevel[count].Smio = 0;
2403         }
2404         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2405
2406         return 0;
2407 }
2408
2409 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2410                                           SMU7_Discrete_DpmTable *table)
2411 {
2412         int ret;
2413
2414         ret = ci_populate_smc_vddc_table(adev, table);
2415         if (ret)
2416                 return ret;
2417
2418         ret = ci_populate_smc_vddci_table(adev, table);
2419         if (ret)
2420                 return ret;
2421
2422         ret = ci_populate_smc_mvdd_table(adev, table);
2423         if (ret)
2424                 return ret;
2425
2426         return 0;
2427 }
2428
2429 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2430                                   SMU7_Discrete_VoltageLevel *voltage)
2431 {
2432         struct ci_power_info *pi = ci_get_pi(adev);
2433         u32 i = 0;
2434
2435         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2436                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2437                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2438                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2439                                 break;
2440                         }
2441                 }
2442
2443                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2444                         return -EINVAL;
2445         }
2446
2447         return -EINVAL;
2448 }
2449
2450 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2451                                          struct atom_voltage_table_entry *voltage_table,
2452                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2453 {
2454         u16 v_index, idx;
2455         bool voltage_found = false;
2456         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2457         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2458
2459         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2460                 return -EINVAL;
2461
2462         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2463                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2464                         if (voltage_table->value ==
2465                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2466                                 voltage_found = true;
2467                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2468                                         idx = v_index;
2469                                 else
2470                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2471                                 *std_voltage_lo_sidd =
2472                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2473                                 *std_voltage_hi_sidd =
2474                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2475                                 break;
2476                         }
2477                 }
2478
2479                 if (!voltage_found) {
2480                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2481                                 if (voltage_table->value <=
2482                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2483                                         voltage_found = true;
2484                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2485                                                 idx = v_index;
2486                                         else
2487                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2488                                         *std_voltage_lo_sidd =
2489                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2490                                         *std_voltage_hi_sidd =
2491                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2492                                         break;
2493                                 }
2494                         }
2495                 }
2496         }
2497
2498         return 0;
2499 }
2500
2501 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2502                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2503                                                   u32 sclk,
2504                                                   u32 *phase_shedding)
2505 {
2506         unsigned int i;
2507
2508         *phase_shedding = 1;
2509
2510         for (i = 0; i < limits->count; i++) {
2511                 if (sclk < limits->entries[i].sclk) {
2512                         *phase_shedding = i;
2513                         break;
2514                 }
2515         }
2516 }
2517
2518 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2519                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2520                                                   u32 mclk,
2521                                                   u32 *phase_shedding)
2522 {
2523         unsigned int i;
2524
2525         *phase_shedding = 1;
2526
2527         for (i = 0; i < limits->count; i++) {
2528                 if (mclk < limits->entries[i].mclk) {
2529                         *phase_shedding = i;
2530                         break;
2531                 }
2532         }
2533 }
2534
2535 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2536 {
2537         struct ci_power_info *pi = ci_get_pi(adev);
2538         u32 tmp;
2539         int ret;
2540
2541         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2542                                      &tmp, pi->sram_end);
2543         if (ret)
2544                 return ret;
2545
2546         tmp &= 0x00FFFFFF;
2547         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2548
2549         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2550                                        tmp, pi->sram_end);
2551 }
2552
2553 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2554                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2555                                          u32 clock, u32 *voltage)
2556 {
2557         u32 i = 0;
2558
2559         if (allowed_clock_voltage_table->count == 0)
2560                 return -EINVAL;
2561
2562         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2563                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2564                         *voltage = allowed_clock_voltage_table->entries[i].v;
2565                         return 0;
2566                 }
2567         }
2568
2569         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2570
2571         return 0;
2572 }
2573
2574 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2575 {
2576         u32 i;
2577         u32 tmp;
2578         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2579
2580         if (sclk < min)
2581                 return 0;
2582
2583         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2584                 tmp = sclk >> i;
2585                 if (tmp >= min || i == 0)
2586                         break;
2587         }
2588
2589         return (u8)i;
2590 }
2591
2592 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2593 {
2594         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2595 }
2596
2597 static int ci_reset_to_default(struct amdgpu_device *adev)
2598 {
2599         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2600                 0 : -EINVAL;
2601 }
2602
2603 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2604 {
2605         u32 tmp;
2606
2607         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2608
2609         if (tmp == MC_CG_ARB_FREQ_F0)
2610                 return 0;
2611
2612         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2613 }
2614
2615 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2616                                         const u32 engine_clock,
2617                                         const u32 memory_clock,
2618                                         u32 *dram_timimg2)
2619 {
2620         bool patch;
2621         u32 tmp, tmp2;
2622
2623         tmp = RREG32(mmMC_SEQ_MISC0);
2624         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2625
2626         if (patch &&
2627             ((adev->pdev->device == 0x67B0) ||
2628              (adev->pdev->device == 0x67B1))) {
2629                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2630                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2631                         *dram_timimg2 &= ~0x00ff0000;
2632                         *dram_timimg2 |= tmp2 << 16;
2633                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2634                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2635                         *dram_timimg2 &= ~0x00ff0000;
2636                         *dram_timimg2 |= tmp2 << 16;
2637                 }
2638         }
2639 }
2640
2641 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2642                                                 u32 sclk,
2643                                                 u32 mclk,
2644                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2645 {
2646         u32 dram_timing;
2647         u32 dram_timing2;
2648         u32 burst_time;
2649
2650         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2651
2652         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2653         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2654         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2655
2656         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2657
2658         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2659         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2660         arb_regs->McArbBurstTime = (u8)burst_time;
2661
2662         return 0;
2663 }
2664
2665 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2666 {
2667         struct ci_power_info *pi = ci_get_pi(adev);
2668         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2669         u32 i, j;
2670         int ret =  0;
2671
2672         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2673
2674         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2675                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2676                         ret = ci_populate_memory_timing_parameters(adev,
2677                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2678                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2679                                                                    &arb_regs.entries[i][j]);
2680                         if (ret)
2681                                 break;
2682                 }
2683         }
2684
2685         if (ret == 0)
2686                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2687                                            pi->arb_table_start,
2688                                            (u8 *)&arb_regs,
2689                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2690                                            pi->sram_end);
2691
2692         return ret;
2693 }
2694
2695 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2696 {
2697         struct ci_power_info *pi = ci_get_pi(adev);
2698
2699         if (pi->need_update_smu7_dpm_table == 0)
2700                 return 0;
2701
2702         return ci_do_program_memory_timing_parameters(adev);
2703 }
2704
2705 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2706                                           struct amdgpu_ps *amdgpu_boot_state)
2707 {
2708         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2709         struct ci_power_info *pi = ci_get_pi(adev);
2710         u32 level = 0;
2711
2712         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2713                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2714                     boot_state->performance_levels[0].sclk) {
2715                         pi->smc_state_table.GraphicsBootLevel = level;
2716                         break;
2717                 }
2718         }
2719
2720         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2721                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2722                     boot_state->performance_levels[0].mclk) {
2723                         pi->smc_state_table.MemoryBootLevel = level;
2724                         break;
2725                 }
2726         }
2727 }
2728
2729 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2730 {
2731         u32 i;
2732         u32 mask_value = 0;
2733
2734         for (i = dpm_table->count; i > 0; i--) {
2735                 mask_value = mask_value << 1;
2736                 if (dpm_table->dpm_levels[i-1].enabled)
2737                         mask_value |= 0x1;
2738                 else
2739                         mask_value &= 0xFFFFFFFE;
2740         }
2741
2742         return mask_value;
2743 }
2744
2745 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2746                                        SMU7_Discrete_DpmTable *table)
2747 {
2748         struct ci_power_info *pi = ci_get_pi(adev);
2749         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2750         u32 i;
2751
2752         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2753                 table->LinkLevel[i].PcieGenSpeed =
2754                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2755                 table->LinkLevel[i].PcieLaneCount =
2756                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2757                 table->LinkLevel[i].EnabledForActivity = 1;
2758                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2759                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2760         }
2761
2762         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2763         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2764                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2765 }
2766
2767 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2768                                      SMU7_Discrete_DpmTable *table)
2769 {
2770         u32 count;
2771         struct atom_clock_dividers dividers;
2772         int ret = -EINVAL;
2773
2774         table->UvdLevelCount =
2775                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2776
2777         for (count = 0; count < table->UvdLevelCount; count++) {
2778                 table->UvdLevel[count].VclkFrequency =
2779                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2780                 table->UvdLevel[count].DclkFrequency =
2781                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2782                 table->UvdLevel[count].MinVddc =
2783                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2784                 table->UvdLevel[count].MinVddcPhases = 1;
2785
2786                 ret = amdgpu_atombios_get_clock_dividers(adev,
2787                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2788                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2789                 if (ret)
2790                         return ret;
2791
2792                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2793
2794                 ret = amdgpu_atombios_get_clock_dividers(adev,
2795                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2796                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2797                 if (ret)
2798                         return ret;
2799
2800                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2801
2802                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2803                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2804                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2805         }
2806
2807         return ret;
2808 }
2809
2810 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2811                                      SMU7_Discrete_DpmTable *table)
2812 {
2813         u32 count;
2814         struct atom_clock_dividers dividers;
2815         int ret = -EINVAL;
2816
2817         table->VceLevelCount =
2818                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2819
2820         for (count = 0; count < table->VceLevelCount; count++) {
2821                 table->VceLevel[count].Frequency =
2822                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2823                 table->VceLevel[count].MinVoltage =
2824                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2825                 table->VceLevel[count].MinPhases = 1;
2826
2827                 ret = amdgpu_atombios_get_clock_dividers(adev,
2828                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2829                                                          table->VceLevel[count].Frequency, false, &dividers);
2830                 if (ret)
2831                         return ret;
2832
2833                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2834
2835                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2836                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2837         }
2838
2839         return ret;
2840
2841 }
2842
2843 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2844                                      SMU7_Discrete_DpmTable *table)
2845 {
2846         u32 count;
2847         struct atom_clock_dividers dividers;
2848         int ret = -EINVAL;
2849
2850         table->AcpLevelCount = (u8)
2851                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2852
2853         for (count = 0; count < table->AcpLevelCount; count++) {
2854                 table->AcpLevel[count].Frequency =
2855                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2856                 table->AcpLevel[count].MinVoltage =
2857                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2858                 table->AcpLevel[count].MinPhases = 1;
2859
2860                 ret = amdgpu_atombios_get_clock_dividers(adev,
2861                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2862                                                          table->AcpLevel[count].Frequency, false, &dividers);
2863                 if (ret)
2864                         return ret;
2865
2866                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2867
2868                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2869                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2870         }
2871
2872         return ret;
2873 }
2874
2875 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2876                                       SMU7_Discrete_DpmTable *table)
2877 {
2878         u32 count;
2879         struct atom_clock_dividers dividers;
2880         int ret = -EINVAL;
2881
2882         table->SamuLevelCount =
2883                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2884
2885         for (count = 0; count < table->SamuLevelCount; count++) {
2886                 table->SamuLevel[count].Frequency =
2887                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2888                 table->SamuLevel[count].MinVoltage =
2889                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2890                 table->SamuLevel[count].MinPhases = 1;
2891
2892                 ret = amdgpu_atombios_get_clock_dividers(adev,
2893                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2894                                                          table->SamuLevel[count].Frequency, false, &dividers);
2895                 if (ret)
2896                         return ret;
2897
2898                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2899
2900                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2901                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2902         }
2903
2904         return ret;
2905 }
2906
2907 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2908                                     u32 memory_clock,
2909                                     SMU7_Discrete_MemoryLevel *mclk,
2910                                     bool strobe_mode,
2911                                     bool dll_state_on)
2912 {
2913         struct ci_power_info *pi = ci_get_pi(adev);
2914         u32  dll_cntl = pi->clock_registers.dll_cntl;
2915         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2916         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2917         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2918         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2919         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2920         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2921         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2922         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2923         struct atom_mpll_param mpll_param;
2924         int ret;
2925
2926         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2927         if (ret)
2928                 return ret;
2929
2930         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2931         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2932
2933         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2934                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2935         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2936                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2937                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2938
2939         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2940         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2941
2942         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2943                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2944                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2945                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2946                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2947         }
2948
2949         if (pi->caps_mclk_ss_support) {
2950                 struct amdgpu_atom_ss ss;
2951                 u32 freq_nom;
2952                 u32 tmp;
2953                 u32 reference_clock = adev->clock.mpll.reference_freq;
2954
2955                 if (mpll_param.qdr == 1)
2956                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2957                 else
2958                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2959
2960                 tmp = (freq_nom / reference_clock);
2961                 tmp = tmp * tmp;
2962                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2963                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2964                         u32 clks = reference_clock * 5 / ss.rate;
2965                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2966
2967                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2968                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2969
2970                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2971                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2972                 }
2973         }
2974
2975         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2976         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2977
2978         if (dll_state_on)
2979                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2980                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2981         else
2982                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2983                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2984
2985         mclk->MclkFrequency = memory_clock;
2986         mclk->MpllFuncCntl = mpll_func_cntl;
2987         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2988         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2989         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2990         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2991         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2992         mclk->DllCntl = dll_cntl;
2993         mclk->MpllSs1 = mpll_ss1;
2994         mclk->MpllSs2 = mpll_ss2;
2995
2996         return 0;
2997 }
2998
2999 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3000                                            u32 memory_clock,
3001                                            SMU7_Discrete_MemoryLevel *memory_level)
3002 {
3003         struct ci_power_info *pi = ci_get_pi(adev);
3004         int ret;
3005         bool dll_state_on;
3006
3007         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
3008                 ret = ci_get_dependency_volt_by_clk(adev,
3009                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3010                                                     memory_clock, &memory_level->MinVddc);
3011                 if (ret)
3012                         return ret;
3013         }
3014
3015         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3016                 ret = ci_get_dependency_volt_by_clk(adev,
3017                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3018                                                     memory_clock, &memory_level->MinVddci);
3019                 if (ret)
3020                         return ret;
3021         }
3022
3023         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3024                 ret = ci_get_dependency_volt_by_clk(adev,
3025                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3026                                                     memory_clock, &memory_level->MinMvdd);
3027                 if (ret)
3028                         return ret;
3029         }
3030
3031         memory_level->MinVddcPhases = 1;
3032
3033         if (pi->vddc_phase_shed_control)
3034                 ci_populate_phase_value_based_on_mclk(adev,
3035                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3036                                                       memory_clock,
3037                                                       &memory_level->MinVddcPhases);
3038
3039         memory_level->EnabledForThrottle = 1;
3040         memory_level->UpH = 0;
3041         memory_level->DownH = 100;
3042         memory_level->VoltageDownH = 0;
3043         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3044
3045         memory_level->StutterEnable = false;
3046         memory_level->StrobeEnable = false;
3047         memory_level->EdcReadEnable = false;
3048         memory_level->EdcWriteEnable = false;
3049         memory_level->RttEnable = false;
3050
3051         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3052
3053         if (pi->mclk_stutter_mode_threshold &&
3054             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3055             (!pi->uvd_enabled) &&
3056             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3057             (adev->pm.dpm.new_active_crtc_count <= 2))
3058                 memory_level->StutterEnable = true;
3059
3060         if (pi->mclk_strobe_mode_threshold &&
3061             (memory_clock <= pi->mclk_strobe_mode_threshold))
3062                 memory_level->StrobeEnable = 1;
3063
3064         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3065                 memory_level->StrobeRatio =
3066                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3067                 if (pi->mclk_edc_enable_threshold &&
3068                     (memory_clock > pi->mclk_edc_enable_threshold))
3069                         memory_level->EdcReadEnable = true;
3070
3071                 if (pi->mclk_edc_wr_enable_threshold &&
3072                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3073                         memory_level->EdcWriteEnable = true;
3074
3075                 if (memory_level->StrobeEnable) {
3076                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3077                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3078                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3079                         else
3080                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3081                 } else {
3082                         dll_state_on = pi->dll_default_on;
3083                 }
3084         } else {
3085                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3086                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3087         }
3088
3089         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3090         if (ret)
3091                 return ret;
3092
3093         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3094         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3095         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3096         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3097
3098         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3099         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3100         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3101         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3102         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3103         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3104         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3105         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3106         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3107         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3108         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3109
3110         return 0;
3111 }
3112
3113 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3114                                       SMU7_Discrete_DpmTable *table)
3115 {
3116         struct ci_power_info *pi = ci_get_pi(adev);
3117         struct atom_clock_dividers dividers;
3118         SMU7_Discrete_VoltageLevel voltage_level;
3119         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3120         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3121         u32 dll_cntl = pi->clock_registers.dll_cntl;
3122         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3123         int ret;
3124
3125         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3126
3127         if (pi->acpi_vddc)
3128                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3129         else
3130                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3131
3132         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3133
3134         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3135
3136         ret = amdgpu_atombios_get_clock_dividers(adev,
3137                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3138                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3139         if (ret)
3140                 return ret;
3141
3142         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3143         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3144         table->ACPILevel.DeepSleepDivId = 0;
3145
3146         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3147         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3148
3149         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3150         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3151
3152         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3153         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3154         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3155         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3156         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3157         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3158         table->ACPILevel.CcPwrDynRm = 0;
3159         table->ACPILevel.CcPwrDynRm1 = 0;
3160
3161         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3162         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3163         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3164         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3165         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3166         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3167         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3168         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3169         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3170         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3171         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3172
3173         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3174         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3175
3176         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3177                 if (pi->acpi_vddci)
3178                         table->MemoryACPILevel.MinVddci =
3179                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3180                 else
3181                         table->MemoryACPILevel.MinVddci =
3182                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3183         }
3184
3185         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3186                 table->MemoryACPILevel.MinMvdd = 0;
3187         else
3188                 table->MemoryACPILevel.MinMvdd =
3189                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3190
3191         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3192                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3193         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3194                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3195
3196         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3197
3198         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3199         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3200         table->MemoryACPILevel.MpllAdFuncCntl =
3201                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3202         table->MemoryACPILevel.MpllDqFuncCntl =
3203                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3204         table->MemoryACPILevel.MpllFuncCntl =
3205                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3206         table->MemoryACPILevel.MpllFuncCntl_1 =
3207                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3208         table->MemoryACPILevel.MpllFuncCntl_2 =
3209                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3210         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3211         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3212
3213         table->MemoryACPILevel.EnabledForThrottle = 0;
3214         table->MemoryACPILevel.EnabledForActivity = 0;
3215         table->MemoryACPILevel.UpH = 0;
3216         table->MemoryACPILevel.DownH = 100;
3217         table->MemoryACPILevel.VoltageDownH = 0;
3218         table->MemoryACPILevel.ActivityLevel =
3219                 cpu_to_be16((u16)pi->mclk_activity_target);
3220
3221         table->MemoryACPILevel.StutterEnable = false;
3222         table->MemoryACPILevel.StrobeEnable = false;
3223         table->MemoryACPILevel.EdcReadEnable = false;
3224         table->MemoryACPILevel.EdcWriteEnable = false;
3225         table->MemoryACPILevel.RttEnable = false;
3226
3227         return 0;
3228 }
3229
3230
3231 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3232 {
3233         struct ci_power_info *pi = ci_get_pi(adev);
3234         struct ci_ulv_parm *ulv = &pi->ulv;
3235
3236         if (ulv->supported) {
3237                 if (enable)
3238                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3239                                 0 : -EINVAL;
3240                 else
3241                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3242                                 0 : -EINVAL;
3243         }
3244
3245         return 0;
3246 }
3247
3248 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3249                                  SMU7_Discrete_Ulv *state)
3250 {
3251         struct ci_power_info *pi = ci_get_pi(adev);
3252         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3253
3254         state->CcPwrDynRm = 0;
3255         state->CcPwrDynRm1 = 0;
3256
3257         if (ulv_voltage == 0) {
3258                 pi->ulv.supported = false;
3259                 return 0;
3260         }
3261
3262         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3263                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3264                         state->VddcOffset = 0;
3265                 else
3266                         state->VddcOffset =
3267                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3268         } else {
3269                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3270                         state->VddcOffsetVid = 0;
3271                 else
3272                         state->VddcOffsetVid = (u8)
3273                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3274                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3275         }
3276         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3277
3278         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3279         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3280         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3281
3282         return 0;
3283 }
3284
3285 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3286                                     u32 engine_clock,
3287                                     SMU7_Discrete_GraphicsLevel *sclk)
3288 {
3289         struct ci_power_info *pi = ci_get_pi(adev);
3290         struct atom_clock_dividers dividers;
3291         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3292         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3293         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3294         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3295         u32 reference_clock = adev->clock.spll.reference_freq;
3296         u32 reference_divider;
3297         u32 fbdiv;
3298         int ret;
3299
3300         ret = amdgpu_atombios_get_clock_dividers(adev,
3301                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3302                                                  engine_clock, false, &dividers);
3303         if (ret)
3304                 return ret;
3305
3306         reference_divider = 1 + dividers.ref_div;
3307         fbdiv = dividers.fb_div & 0x3FFFFFF;
3308
3309         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3310         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3311         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3312
3313         if (pi->caps_sclk_ss_support) {
3314                 struct amdgpu_atom_ss ss;
3315                 u32 vco_freq = engine_clock * dividers.post_div;
3316
3317                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3318                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3319                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3320                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3321
3322                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3323                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3324                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3325
3326                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3327                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3328                 }
3329         }
3330
3331         sclk->SclkFrequency = engine_clock;
3332         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3333         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3334         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3335         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3336         sclk->SclkDid = (u8)dividers.post_divider;
3337
3338         return 0;
3339 }
3340
3341 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3342                                             u32 engine_clock,
3343                                             u16 sclk_activity_level_t,
3344                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3345 {
3346         struct ci_power_info *pi = ci_get_pi(adev);
3347         int ret;
3348
3349         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3350         if (ret)
3351                 return ret;
3352
3353         ret = ci_get_dependency_volt_by_clk(adev,
3354                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3355                                             engine_clock, &graphic_level->MinVddc);
3356         if (ret)
3357                 return ret;
3358
3359         graphic_level->SclkFrequency = engine_clock;
3360
3361         graphic_level->Flags =  0;
3362         graphic_level->MinVddcPhases = 1;
3363
3364         if (pi->vddc_phase_shed_control)
3365                 ci_populate_phase_value_based_on_sclk(adev,
3366                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3367                                                       engine_clock,
3368                                                       &graphic_level->MinVddcPhases);
3369
3370         graphic_level->ActivityLevel = sclk_activity_level_t;
3371
3372         graphic_level->CcPwrDynRm = 0;
3373         graphic_level->CcPwrDynRm1 = 0;
3374         graphic_level->EnabledForThrottle = 1;
3375         graphic_level->UpH = 0;
3376         graphic_level->DownH = 0;
3377         graphic_level->VoltageDownH = 0;
3378         graphic_level->PowerThrottle = 0;
3379
3380         if (pi->caps_sclk_ds)
3381                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3382                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3383
3384         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3385
3386         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3387         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3388         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3389         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3390         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3391         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3392         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3393         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3394         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3395         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3396         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3397
3398         return 0;
3399 }
3400
3401 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3402 {
3403         struct ci_power_info *pi = ci_get_pi(adev);
3404         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3405         u32 level_array_address = pi->dpm_table_start +
3406                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3407         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3408                 SMU7_MAX_LEVELS_GRAPHICS;
3409         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3410         u32 i, ret;
3411
3412         memset(levels, 0, level_array_size);
3413
3414         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3415                 ret = ci_populate_single_graphic_level(adev,
3416                                                        dpm_table->sclk_table.dpm_levels[i].value,
3417                                                        (u16)pi->activity_target[i],
3418                                                        &pi->smc_state_table.GraphicsLevel[i]);
3419                 if (ret)
3420                         return ret;
3421                 if (i > 1)
3422                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3423                 if (i == (dpm_table->sclk_table.count - 1))
3424                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3425                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3426         }
3427         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3428
3429         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3430         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3431                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3432
3433         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3434                                    (u8 *)levels, level_array_size,
3435                                    pi->sram_end);
3436         if (ret)
3437                 return ret;
3438
3439         return 0;
3440 }
3441
3442 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3443                                  SMU7_Discrete_Ulv *ulv_level)
3444 {
3445         return ci_populate_ulv_level(adev, ulv_level);
3446 }
3447
3448 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3449 {
3450         struct ci_power_info *pi = ci_get_pi(adev);
3451         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3452         u32 level_array_address = pi->dpm_table_start +
3453                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3454         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3455                 SMU7_MAX_LEVELS_MEMORY;
3456         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3457         u32 i, ret;
3458
3459         memset(levels, 0, level_array_size);
3460
3461         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3462                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3463                         return -EINVAL;
3464                 ret = ci_populate_single_memory_level(adev,
3465                                                       dpm_table->mclk_table.dpm_levels[i].value,
3466                                                       &pi->smc_state_table.MemoryLevel[i]);
3467                 if (ret)
3468                         return ret;
3469         }
3470
3471         pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3472
3473         if ((dpm_table->mclk_table.count >= 2) &&
3474             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3475                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3476                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3477                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3478                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3479         }
3480
3481         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3482
3483         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3484         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3485                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3486
3487         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3488                 PPSMC_DISPLAY_WATERMARK_HIGH;
3489
3490         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3491                                    (u8 *)levels, level_array_size,
3492                                    pi->sram_end);
3493         if (ret)
3494                 return ret;
3495
3496         return 0;
3497 }
3498
3499 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3500                                       struct ci_single_dpm_table* dpm_table,
3501                                       u32 count)
3502 {
3503         u32 i;
3504
3505         dpm_table->count = count;
3506         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3507                 dpm_table->dpm_levels[i].enabled = false;
3508 }
3509
3510 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3511                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3512 {
3513         dpm_table->dpm_levels[index].value = pcie_gen;
3514         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3515         dpm_table->dpm_levels[index].enabled = true;
3516 }
3517
3518 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3519 {
3520         struct ci_power_info *pi = ci_get_pi(adev);
3521
3522         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3523                 return -EINVAL;
3524
3525         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3526                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3527                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3528         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3529                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3530                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3531         }
3532
3533         ci_reset_single_dpm_table(adev,
3534                                   &pi->dpm_table.pcie_speed_table,
3535                                   SMU7_MAX_LEVELS_LINK);
3536
3537         if (adev->asic_type == CHIP_BONAIRE)
3538                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3539                                           pi->pcie_gen_powersaving.min,
3540                                           pi->pcie_lane_powersaving.max);
3541         else
3542                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3543                                           pi->pcie_gen_powersaving.min,
3544                                           pi->pcie_lane_powersaving.min);
3545         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3546                                   pi->pcie_gen_performance.min,
3547                                   pi->pcie_lane_performance.min);
3548         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3549                                   pi->pcie_gen_powersaving.min,
3550                                   pi->pcie_lane_powersaving.max);
3551         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3552                                   pi->pcie_gen_performance.min,
3553                                   pi->pcie_lane_performance.max);
3554         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3555                                   pi->pcie_gen_powersaving.max,
3556                                   pi->pcie_lane_powersaving.max);
3557         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3558                                   pi->pcie_gen_performance.max,
3559                                   pi->pcie_lane_performance.max);
3560
3561         pi->dpm_table.pcie_speed_table.count = 6;
3562
3563         return 0;
3564 }
3565
3566 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3567 {
3568         struct ci_power_info *pi = ci_get_pi(adev);
3569         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3570                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3571         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3572                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3573         struct amdgpu_cac_leakage_table *std_voltage_table =
3574                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3575         u32 i;
3576
3577         if (allowed_sclk_vddc_table == NULL)
3578                 return -EINVAL;
3579         if (allowed_sclk_vddc_table->count < 1)
3580                 return -EINVAL;
3581         if (allowed_mclk_table == NULL)
3582                 return -EINVAL;
3583         if (allowed_mclk_table->count < 1)
3584                 return -EINVAL;
3585
3586         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3587
3588         ci_reset_single_dpm_table(adev,
3589                                   &pi->dpm_table.sclk_table,
3590                                   SMU7_MAX_LEVELS_GRAPHICS);
3591         ci_reset_single_dpm_table(adev,
3592                                   &pi->dpm_table.mclk_table,
3593                                   SMU7_MAX_LEVELS_MEMORY);
3594         ci_reset_single_dpm_table(adev,
3595                                   &pi->dpm_table.vddc_table,
3596                                   SMU7_MAX_LEVELS_VDDC);
3597         ci_reset_single_dpm_table(adev,
3598                                   &pi->dpm_table.vddci_table,
3599                                   SMU7_MAX_LEVELS_VDDCI);
3600         ci_reset_single_dpm_table(adev,
3601                                   &pi->dpm_table.mvdd_table,
3602                                   SMU7_MAX_LEVELS_MVDD);
3603
3604         pi->dpm_table.sclk_table.count = 0;
3605         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3606                 if ((i == 0) ||
3607                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3608                      allowed_sclk_vddc_table->entries[i].clk)) {
3609                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3610                                 allowed_sclk_vddc_table->entries[i].clk;
3611                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3612                                 (i == 0) ? true : false;
3613                         pi->dpm_table.sclk_table.count++;
3614                 }
3615         }
3616
3617         pi->dpm_table.mclk_table.count = 0;
3618         for (i = 0; i < allowed_mclk_table->count; i++) {
3619                 if ((i == 0) ||
3620                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3621                      allowed_mclk_table->entries[i].clk)) {
3622                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3623                                 allowed_mclk_table->entries[i].clk;
3624                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3625                                 (i == 0) ? true : false;
3626                         pi->dpm_table.mclk_table.count++;
3627                 }
3628         }
3629
3630         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3631                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3632                         allowed_sclk_vddc_table->entries[i].v;
3633                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3634                         std_voltage_table->entries[i].leakage;
3635                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3636         }
3637         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3638
3639         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3640         if (allowed_mclk_table) {
3641                 for (i = 0; i < allowed_mclk_table->count; i++) {
3642                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3643                                 allowed_mclk_table->entries[i].v;
3644                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3645                 }
3646                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3647         }
3648
3649         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3650         if (allowed_mclk_table) {
3651                 for (i = 0; i < allowed_mclk_table->count; i++) {
3652                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3653                                 allowed_mclk_table->entries[i].v;
3654                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3655                 }
3656                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3657         }
3658
3659         ci_setup_default_pcie_tables(adev);
3660
3661         /* save a copy of the default DPM table */
3662         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3663                         sizeof(struct ci_dpm_table));
3664
3665         return 0;
3666 }
3667
3668 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3669                               u32 value, u32 *boot_level)
3670 {
3671         u32 i;
3672         int ret = -EINVAL;
3673
3674         for(i = 0; i < table->count; i++) {
3675                 if (value == table->dpm_levels[i].value) {
3676                         *boot_level = i;
3677                         ret = 0;
3678                 }
3679         }
3680
3681         return ret;
3682 }
3683
3684 static void ci_save_default_power_profile(struct amdgpu_device *adev)
3685 {
3686         struct ci_power_info *pi = ci_get_pi(adev);
3687         struct SMU7_Discrete_GraphicsLevel *levels =
3688                                 pi->smc_state_table.GraphicsLevel;
3689         uint32_t min_level = 0;
3690
3691         pi->default_gfx_power_profile.activity_threshold =
3692                         be16_to_cpu(levels[0].ActivityLevel);
3693         pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
3694         pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
3695         pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
3696
3697         pi->default_compute_power_profile = pi->default_gfx_power_profile;
3698         pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
3699
3700         /* Optimize compute power profile: Use only highest
3701          * 2 power levels (if more than 2 are available), Hysteresis:
3702          * 0ms up, 5ms down
3703          */
3704         if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
3705                 min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
3706         else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
3707                 min_level = 1;
3708         pi->default_compute_power_profile.min_sclk =
3709                         be32_to_cpu(levels[min_level].SclkFrequency);
3710
3711         pi->default_compute_power_profile.up_hyst = 0;
3712         pi->default_compute_power_profile.down_hyst = 5;
3713
3714         pi->gfx_power_profile = pi->default_gfx_power_profile;
3715         pi->compute_power_profile = pi->default_compute_power_profile;
3716 }
3717
3718 static int ci_init_smc_table(struct amdgpu_device *adev)
3719 {
3720         struct ci_power_info *pi = ci_get_pi(adev);
3721         struct ci_ulv_parm *ulv = &pi->ulv;
3722         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3723         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3724         int ret;
3725
3726         ret = ci_setup_default_dpm_tables(adev);
3727         if (ret)
3728                 return ret;
3729
3730         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3731                 ci_populate_smc_voltage_tables(adev, table);
3732
3733         ci_init_fps_limits(adev);
3734
3735         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3736                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3737
3738         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3739                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3740
3741         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3742                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3743
3744         if (ulv->supported) {
3745                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3746                 if (ret)
3747                         return ret;
3748                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3749         }
3750
3751         ret = ci_populate_all_graphic_levels(adev);
3752         if (ret)
3753                 return ret;
3754
3755         ret = ci_populate_all_memory_levels(adev);
3756         if (ret)
3757                 return ret;
3758
3759         ci_populate_smc_link_level(adev, table);
3760
3761         ret = ci_populate_smc_acpi_level(adev, table);
3762         if (ret)
3763                 return ret;
3764
3765         ret = ci_populate_smc_vce_level(adev, table);
3766         if (ret)
3767                 return ret;
3768
3769         ret = ci_populate_smc_acp_level(adev, table);
3770         if (ret)
3771                 return ret;
3772
3773         ret = ci_populate_smc_samu_level(adev, table);
3774         if (ret)
3775                 return ret;
3776
3777         ret = ci_do_program_memory_timing_parameters(adev);
3778         if (ret)
3779                 return ret;
3780
3781         ret = ci_populate_smc_uvd_level(adev, table);
3782         if (ret)
3783                 return ret;
3784
3785         table->UvdBootLevel  = 0;
3786         table->VceBootLevel  = 0;
3787         table->AcpBootLevel  = 0;
3788         table->SamuBootLevel  = 0;
3789         table->GraphicsBootLevel  = 0;
3790         table->MemoryBootLevel  = 0;
3791
3792         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3793                                  pi->vbios_boot_state.sclk_bootup_value,
3794                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3795
3796         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3797                                  pi->vbios_boot_state.mclk_bootup_value,
3798                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3799
3800         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3801         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3802         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3803
3804         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3805
3806         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3807         if (ret)
3808                 return ret;
3809
3810         table->UVDInterval = 1;
3811         table->VCEInterval = 1;
3812         table->ACPInterval = 1;
3813         table->SAMUInterval = 1;
3814         table->GraphicsVoltageChangeEnable = 1;
3815         table->GraphicsThermThrottleEnable = 1;
3816         table->GraphicsInterval = 1;
3817         table->VoltageInterval = 1;
3818         table->ThermalInterval = 1;
3819         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3820                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3821         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3822                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3823         table->MemoryVoltageChangeEnable = 1;
3824         table->MemoryInterval = 1;
3825         table->VoltageResponseTime = 0;
3826         table->VddcVddciDelta = 4000;
3827         table->PhaseResponseTime = 0;
3828         table->MemoryThermThrottleEnable = 1;
3829         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3830         table->PCIeGenInterval = 1;
3831         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3832                 table->SVI2Enable  = 1;
3833         else
3834                 table->SVI2Enable  = 0;
3835
3836         table->ThermGpio = 17;
3837         table->SclkStepSize = 0x4000;
3838
3839         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3840         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3841         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3842         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3843         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3844         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3845         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3846         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3847         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3848         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3849         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3850         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3851         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3852         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3853
3854         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3855                                    pi->dpm_table_start +
3856                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3857                                    (u8 *)&table->SystemFlags,
3858                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3859                                    pi->sram_end);
3860         if (ret)
3861                 return ret;
3862
3863         ci_save_default_power_profile(adev);
3864
3865         return 0;
3866 }
3867
3868 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3869                                       struct ci_single_dpm_table *dpm_table,
3870                                       u32 low_limit, u32 high_limit)
3871 {
3872         u32 i;
3873
3874         for (i = 0; i < dpm_table->count; i++) {
3875                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3876                     (dpm_table->dpm_levels[i].value > high_limit))
3877                         dpm_table->dpm_levels[i].enabled = false;
3878                 else
3879                         dpm_table->dpm_levels[i].enabled = true;
3880         }
3881 }
3882
3883 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3884                                     u32 speed_low, u32 lanes_low,
3885                                     u32 speed_high, u32 lanes_high)
3886 {
3887         struct ci_power_info *pi = ci_get_pi(adev);
3888         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3889         u32 i, j;
3890
3891         for (i = 0; i < pcie_table->count; i++) {
3892                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3893                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3894                     (pcie_table->dpm_levels[i].value > speed_high) ||
3895                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3896                         pcie_table->dpm_levels[i].enabled = false;
3897                 else
3898                         pcie_table->dpm_levels[i].enabled = true;
3899         }
3900
3901         for (i = 0; i < pcie_table->count; i++) {
3902                 if (pcie_table->dpm_levels[i].enabled) {
3903                         for (j = i + 1; j < pcie_table->count; j++) {
3904                                 if (pcie_table->dpm_levels[j].enabled) {
3905                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3906                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3907                                                 pcie_table->dpm_levels[j].enabled = false;
3908                                 }
3909                         }
3910                 }
3911         }
3912 }
3913
3914 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3915                               struct amdgpu_ps *amdgpu_state)
3916 {
3917         struct ci_ps *state = ci_get_ps(amdgpu_state);
3918         struct ci_power_info *pi = ci_get_pi(adev);
3919         u32 high_limit_count;
3920
3921         if (state->performance_level_count < 1)
3922                 return -EINVAL;
3923
3924         if (state->performance_level_count == 1)
3925                 high_limit_count = 0;
3926         else
3927                 high_limit_count = 1;
3928
3929         ci_trim_single_dpm_states(adev,
3930                                   &pi->dpm_table.sclk_table,
3931                                   state->performance_levels[0].sclk,
3932                                   state->performance_levels[high_limit_count].sclk);
3933
3934         ci_trim_single_dpm_states(adev,
3935                                   &pi->dpm_table.mclk_table,
3936                                   state->performance_levels[0].mclk,
3937                                   state->performance_levels[high_limit_count].mclk);
3938
3939         ci_trim_pcie_dpm_states(adev,
3940                                 state->performance_levels[0].pcie_gen,
3941                                 state->performance_levels[0].pcie_lane,
3942                                 state->performance_levels[high_limit_count].pcie_gen,
3943                                 state->performance_levels[high_limit_count].pcie_lane);
3944
3945         return 0;
3946 }
3947
3948 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3949 {
3950         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3951                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3952         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3953                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3954         u32 requested_voltage = 0;
3955         u32 i;
3956
3957         if (disp_voltage_table == NULL)
3958                 return -EINVAL;
3959         if (!disp_voltage_table->count)
3960                 return -EINVAL;
3961
3962         for (i = 0; i < disp_voltage_table->count; i++) {
3963                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3964                         requested_voltage = disp_voltage_table->entries[i].v;
3965         }
3966
3967         for (i = 0; i < vddc_table->count; i++) {
3968                 if (requested_voltage <= vddc_table->entries[i].v) {
3969                         requested_voltage = vddc_table->entries[i].v;
3970                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3971                                                                   PPSMC_MSG_VddC_Request,
3972                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3973                                 0 : -EINVAL;
3974                 }
3975         }
3976
3977         return -EINVAL;
3978 }
3979
3980 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3981 {
3982         struct ci_power_info *pi = ci_get_pi(adev);
3983         PPSMC_Result result;
3984
3985         ci_apply_disp_minimum_voltage_request(adev);
3986
3987         if (!pi->sclk_dpm_key_disabled) {
3988                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3989                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3990                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3991                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3992                         if (result != PPSMC_Result_OK)
3993                                 return -EINVAL;
3994                 }
3995         }
3996
3997         if (!pi->mclk_dpm_key_disabled) {
3998                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3999                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4000                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
4001                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4002                         if (result != PPSMC_Result_OK)
4003                                 return -EINVAL;
4004                 }
4005         }
4006
4007 #if 0
4008         if (!pi->pcie_dpm_key_disabled) {
4009                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4010                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4011                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
4012                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4013                         if (result != PPSMC_Result_OK)
4014                                 return -EINVAL;
4015                 }
4016         }
4017 #endif
4018
4019         return 0;
4020 }
4021
4022 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
4023                                                    struct amdgpu_ps *amdgpu_state)
4024 {
4025         struct ci_power_info *pi = ci_get_pi(adev);
4026         struct ci_ps *state = ci_get_ps(amdgpu_state);
4027         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
4028         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4029         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
4030         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4031         u32 i;
4032
4033         pi->need_update_smu7_dpm_table = 0;
4034
4035         for (i = 0; i < sclk_table->count; i++) {
4036                 if (sclk == sclk_table->dpm_levels[i].value)
4037                         break;
4038         }
4039
4040         if (i >= sclk_table->count) {
4041                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4042         } else {
4043                 /* XXX check display min clock requirements */
4044                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
4045                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4046         }
4047
4048         for (i = 0; i < mclk_table->count; i++) {
4049                 if (mclk == mclk_table->dpm_levels[i].value)
4050                         break;
4051         }
4052
4053         if (i >= mclk_table->count)
4054                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4055
4056         if (adev->pm.dpm.current_active_crtc_count !=
4057             adev->pm.dpm.new_active_crtc_count)
4058                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4059 }
4060
4061 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4062                                                        struct amdgpu_ps *amdgpu_state)
4063 {
4064         struct ci_power_info *pi = ci_get_pi(adev);
4065         struct ci_ps *state = ci_get_ps(amdgpu_state);
4066         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4067         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4068         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4069         int ret;
4070
4071         if (!pi->need_update_smu7_dpm_table)
4072                 return 0;
4073
4074         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4075                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4076
4077         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4078                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4079
4080         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4081                 ret = ci_populate_all_graphic_levels(adev);
4082                 if (ret)
4083                         return ret;
4084         }
4085
4086         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4087                 ret = ci_populate_all_memory_levels(adev);
4088                 if (ret)
4089                         return ret;
4090         }
4091
4092         return 0;
4093 }
4094
4095 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4096 {
4097         struct ci_power_info *pi = ci_get_pi(adev);
4098         const struct amdgpu_clock_and_voltage_limits *max_limits;
4099         int i;
4100
4101         if (adev->pm.dpm.ac_power)
4102                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4103         else
4104                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4105
4106         if (enable) {
4107                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4108
4109                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4110                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4111                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4112
4113                                 if (!pi->caps_uvd_dpm)
4114                                         break;
4115                         }
4116                 }
4117
4118                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4119                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4120                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4121
4122                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4123                         pi->uvd_enabled = true;
4124                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4125                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4126                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4127                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4128                 }
4129         } else {
4130                 if (pi->uvd_enabled) {
4131                         pi->uvd_enabled = false;
4132                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4133                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4134                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4135                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4136                 }
4137         }
4138
4139         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4140                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4141                 0 : -EINVAL;
4142 }
4143
4144 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4145 {
4146         struct ci_power_info *pi = ci_get_pi(adev);
4147         const struct amdgpu_clock_and_voltage_limits *max_limits;
4148         int i;
4149
4150         if (adev->pm.dpm.ac_power)
4151                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4152         else
4153                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4154
4155         if (enable) {
4156                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4157                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4158                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4159                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4160
4161                                 if (!pi->caps_vce_dpm)
4162                                         break;
4163                         }
4164                 }
4165
4166                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4167                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4168                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4169         }
4170
4171         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4172                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4173                 0 : -EINVAL;
4174 }
4175
4176 #if 0
4177 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4178 {
4179         struct ci_power_info *pi = ci_get_pi(adev);
4180         const struct amdgpu_clock_and_voltage_limits *max_limits;
4181         int i;
4182
4183         if (adev->pm.dpm.ac_power)
4184                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4185         else
4186                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4187
4188         if (enable) {
4189                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4190                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4191                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4192                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4193
4194                                 if (!pi->caps_samu_dpm)
4195                                         break;
4196                         }
4197                 }
4198
4199                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4200                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4201                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4202         }
4203         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4204                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4205                 0 : -EINVAL;
4206 }
4207
4208 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4209 {
4210         struct ci_power_info *pi = ci_get_pi(adev);
4211         const struct amdgpu_clock_and_voltage_limits *max_limits;
4212         int i;
4213
4214         if (adev->pm.dpm.ac_power)
4215                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4216         else
4217                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4218
4219         if (enable) {
4220                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4221                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4222                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4223                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4224
4225                                 if (!pi->caps_acp_dpm)
4226                                         break;
4227                         }
4228                 }
4229
4230                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4231                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4232                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4233         }
4234
4235         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4236                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4237                 0 : -EINVAL;
4238 }
4239 #endif
4240
4241 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4242 {
4243         struct ci_power_info *pi = ci_get_pi(adev);
4244         u32 tmp;
4245         int ret = 0;
4246
4247         if (!gate) {
4248                 /* turn the clocks on when decoding */
4249                 if (pi->caps_uvd_dpm ||
4250                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4251                         pi->smc_state_table.UvdBootLevel = 0;
4252                 else
4253                         pi->smc_state_table.UvdBootLevel =
4254                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4255
4256                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4257                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4258                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4259                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4260                 ret = ci_enable_uvd_dpm(adev, true);
4261         } else {
4262                 ret = ci_enable_uvd_dpm(adev, false);
4263                 if (ret)
4264                         return ret;
4265         }
4266
4267         return ret;
4268 }
4269
4270 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4271 {
4272         u8 i;
4273         u32 min_evclk = 30000; /* ??? */
4274         struct amdgpu_vce_clock_voltage_dependency_table *table =
4275                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4276
4277         for (i = 0; i < table->count; i++) {
4278                 if (table->entries[i].evclk >= min_evclk)
4279                         return i;
4280         }
4281
4282         return table->count - 1;
4283 }
4284
4285 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4286                              struct amdgpu_ps *amdgpu_new_state,
4287                              struct amdgpu_ps *amdgpu_current_state)
4288 {
4289         struct ci_power_info *pi = ci_get_pi(adev);
4290         int ret = 0;
4291         u32 tmp;
4292
4293         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4294                 if (amdgpu_new_state->evclk) {
4295                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4296                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4297                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4298                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4299                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4300
4301                         ret = ci_enable_vce_dpm(adev, true);
4302                 } else {
4303                         ret = ci_enable_vce_dpm(adev, false);
4304                         if (ret)
4305                                 return ret;
4306                 }
4307         }
4308         return ret;
4309 }
4310
4311 #if 0
4312 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4313 {
4314         return ci_enable_samu_dpm(adev, gate);
4315 }
4316
4317 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4318 {
4319         struct ci_power_info *pi = ci_get_pi(adev);
4320         u32 tmp;
4321
4322         if (!gate) {
4323                 pi->smc_state_table.AcpBootLevel = 0;
4324
4325                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4326                 tmp &= ~AcpBootLevel_MASK;
4327                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4328                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4329         }
4330
4331         return ci_enable_acp_dpm(adev, !gate);
4332 }
4333 #endif
4334
4335 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4336                                              struct amdgpu_ps *amdgpu_state)
4337 {
4338         struct ci_power_info *pi = ci_get_pi(adev);
4339         int ret;
4340
4341         ret = ci_trim_dpm_states(adev, amdgpu_state);
4342         if (ret)
4343                 return ret;
4344
4345         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4346                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4347         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4348                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4349         pi->last_mclk_dpm_enable_mask =
4350                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4351         if (pi->uvd_enabled) {
4352                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4353                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4354         }
4355         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4356                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4357
4358         return 0;
4359 }
4360
4361 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4362                                        u32 level_mask)
4363 {
4364         u32 level = 0;
4365
4366         while ((level_mask & (1 << level)) == 0)
4367                 level++;
4368
4369         return level;
4370 }
4371
4372
4373 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4374                                           enum amd_dpm_forced_level level)
4375 {
4376         struct ci_power_info *pi = ci_get_pi(adev);
4377         u32 tmp, levels, i;
4378         int ret;
4379
4380         if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
4381                 if ((!pi->pcie_dpm_key_disabled) &&
4382                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4383                         levels = 0;
4384                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4385                         while (tmp >>= 1)
4386                                 levels++;
4387                         if (levels) {
4388                                 ret = ci_dpm_force_state_pcie(adev, level);
4389                                 if (ret)
4390                                         return ret;
4391                                 for (i = 0; i < adev->usec_timeout; i++) {
4392                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4393                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4394                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4395                                         if (tmp == levels)
4396                                                 break;
4397                                         udelay(1);
4398                                 }
4399                         }
4400                 }
4401                 if ((!pi->sclk_dpm_key_disabled) &&
4402                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4403                         levels = 0;
4404                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4405                         while (tmp >>= 1)
4406                                 levels++;
4407                         if (levels) {
4408                                 ret = ci_dpm_force_state_sclk(adev, levels);
4409                                 if (ret)
4410                                         return ret;
4411                                 for (i = 0; i < adev->usec_timeout; i++) {
4412                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4413                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4414                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4415                                         if (tmp == levels)
4416                                                 break;
4417                                         udelay(1);
4418                                 }
4419                         }
4420                 }
4421                 if ((!pi->mclk_dpm_key_disabled) &&
4422                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4423                         levels = 0;
4424                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4425                         while (tmp >>= 1)
4426                                 levels++;
4427                         if (levels) {
4428                                 ret = ci_dpm_force_state_mclk(adev, levels);
4429                                 if (ret)
4430                                         return ret;
4431                                 for (i = 0; i < adev->usec_timeout; i++) {
4432                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4433                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4434                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4435                                         if (tmp == levels)
4436                                                 break;
4437                                         udelay(1);
4438                                 }
4439                         }
4440                 }
4441         } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
4442                 if ((!pi->sclk_dpm_key_disabled) &&
4443                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4444                         levels = ci_get_lowest_enabled_level(adev,
4445                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4446                         ret = ci_dpm_force_state_sclk(adev, levels);
4447                         if (ret)
4448                                 return ret;
4449                         for (i = 0; i < adev->usec_timeout; i++) {
4450                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4451                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4452                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4453                                 if (tmp == levels)
4454                                         break;
4455                                 udelay(1);
4456                         }
4457                 }
4458                 if ((!pi->mclk_dpm_key_disabled) &&
4459                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4460                         levels = ci_get_lowest_enabled_level(adev,
4461                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4462                         ret = ci_dpm_force_state_mclk(adev, levels);
4463                         if (ret)
4464                                 return ret;
4465                         for (i = 0; i < adev->usec_timeout; i++) {
4466                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4467                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4468                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4469                                 if (tmp == levels)
4470                                         break;
4471                                 udelay(1);
4472                         }
4473                 }
4474                 if ((!pi->pcie_dpm_key_disabled) &&
4475                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4476                         levels = ci_get_lowest_enabled_level(adev,
4477                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4478                         ret = ci_dpm_force_state_pcie(adev, levels);
4479                         if (ret)
4480                                 return ret;
4481                         for (i = 0; i < adev->usec_timeout; i++) {
4482                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4483                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4484                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4485                                 if (tmp == levels)
4486                                         break;
4487                                 udelay(1);
4488                         }
4489                 }
4490         } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
4491                 if (!pi->pcie_dpm_key_disabled) {
4492                         PPSMC_Result smc_result;
4493
4494                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4495                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4496                         if (smc_result != PPSMC_Result_OK)
4497                                 return -EINVAL;
4498                 }
4499                 ret = ci_upload_dpm_level_enable_mask(adev);
4500                 if (ret)
4501                         return ret;
4502         }
4503
4504         adev->pm.dpm.forced_level = level;
4505
4506         return 0;
4507 }
4508
4509 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4510                                        struct ci_mc_reg_table *table)
4511 {
4512         u8 i, j, k;
4513         u32 temp_reg;
4514
4515         for (i = 0, j = table->last; i < table->last; i++) {
4516                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4517                         return -EINVAL;
4518                 switch(table->mc_reg_address[i].s1) {
4519                 case mmMC_SEQ_MISC1:
4520                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4521                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4522                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4523                         for (k = 0; k < table->num_entries; k++) {
4524                                 table->mc_reg_table_entry[k].mc_data[j] =
4525                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4526                         }
4527                         j++;
4528                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4529                                 return -EINVAL;
4530
4531                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4532                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4533                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4534                         for (k = 0; k < table->num_entries; k++) {
4535                                 table->mc_reg_table_entry[k].mc_data[j] =
4536                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4537                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4538                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4539                         }
4540                         j++;
4541                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4542                                 return -EINVAL;
4543
4544                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4545                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4546                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4547                                 for (k = 0; k < table->num_entries; k++) {
4548                                         table->mc_reg_table_entry[k].mc_data[j] =
4549                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4550                                 }
4551                                 j++;
4552                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4553                                         return -EINVAL;
4554                         }
4555                         break;
4556                 case mmMC_SEQ_RESERVE_M:
4557                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4558                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4559                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4560                         for (k = 0; k < table->num_entries; k++) {
4561                                 table->mc_reg_table_entry[k].mc_data[j] =
4562                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4563                         }
4564                         j++;
4565                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4566                                 return -EINVAL;
4567                         break;
4568                 default:
4569                         break;
4570                 }
4571
4572         }
4573
4574         table->last = j;
4575
4576         return 0;
4577 }
4578
4579 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4580 {
4581         bool result = true;
4582
4583         switch(in_reg) {
4584         case mmMC_SEQ_RAS_TIMING:
4585                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4586                 break;
4587         case mmMC_SEQ_DLL_STBY:
4588                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4589                 break;
4590         case mmMC_SEQ_G5PDX_CMD0:
4591                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4592                 break;
4593         case mmMC_SEQ_G5PDX_CMD1:
4594                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4595                 break;
4596         case mmMC_SEQ_G5PDX_CTRL:
4597                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4598                 break;
4599         case mmMC_SEQ_CAS_TIMING:
4600                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4601             break;
4602         case mmMC_SEQ_MISC_TIMING:
4603                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4604                 break;
4605         case mmMC_SEQ_MISC_TIMING2:
4606                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4607                 break;
4608         case mmMC_SEQ_PMG_DVS_CMD:
4609                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4610                 break;
4611         case mmMC_SEQ_PMG_DVS_CTL:
4612                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4613                 break;
4614         case mmMC_SEQ_RD_CTL_D0:
4615                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4616                 break;
4617         case mmMC_SEQ_RD_CTL_D1:
4618                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4619                 break;
4620         case mmMC_SEQ_WR_CTL_D0:
4621                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4622                 break;
4623         case mmMC_SEQ_WR_CTL_D1:
4624                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4625                 break;
4626         case mmMC_PMG_CMD_EMRS:
4627                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4628                 break;
4629         case mmMC_PMG_CMD_MRS:
4630                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4631                 break;
4632         case mmMC_PMG_CMD_MRS1:
4633                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4634                 break;
4635         case mmMC_SEQ_PMG_TIMING:
4636                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4637                 break;
4638         case mmMC_PMG_CMD_MRS2:
4639                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4640                 break;
4641         case mmMC_SEQ_WR_CTL_2:
4642                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4643                 break;
4644         default:
4645                 result = false;
4646                 break;
4647         }
4648
4649         return result;
4650 }
4651
4652 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4653 {
4654         u8 i, j;
4655
4656         for (i = 0; i < table->last; i++) {
4657                 for (j = 1; j < table->num_entries; j++) {
4658                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4659                             table->mc_reg_table_entry[j].mc_data[i]) {
4660                                 table->valid_flag |= 1 << i;
4661                                 break;
4662                         }
4663                 }
4664         }
4665 }
4666
4667 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4668 {
4669         u32 i;
4670         u16 address;
4671
4672         for (i = 0; i < table->last; i++) {
4673                 table->mc_reg_address[i].s0 =
4674                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4675                         address : table->mc_reg_address[i].s1;
4676         }
4677 }
4678
4679 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4680                                       struct ci_mc_reg_table *ci_table)
4681 {
4682         u8 i, j;
4683
4684         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4685                 return -EINVAL;
4686         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4687                 return -EINVAL;
4688
4689         for (i = 0; i < table->last; i++)
4690                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4691
4692         ci_table->last = table->last;
4693
4694         for (i = 0; i < table->num_entries; i++) {
4695                 ci_table->mc_reg_table_entry[i].mclk_max =
4696                         table->mc_reg_table_entry[i].mclk_max;
4697                 for (j = 0; j < table->last; j++)
4698                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4699                                 table->mc_reg_table_entry[i].mc_data[j];
4700         }
4701         ci_table->num_entries = table->num_entries;
4702
4703         return 0;
4704 }
4705
4706 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4707                                        struct ci_mc_reg_table *table)
4708 {
4709         u8 i, k;
4710         u32 tmp;
4711         bool patch;
4712
4713         tmp = RREG32(mmMC_SEQ_MISC0);
4714         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4715
4716         if (patch &&
4717             ((adev->pdev->device == 0x67B0) ||
4718              (adev->pdev->device == 0x67B1))) {
4719                 for (i = 0; i < table->last; i++) {
4720                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4721                                 return -EINVAL;
4722                         switch (table->mc_reg_address[i].s1) {
4723                         case mmMC_SEQ_MISC1:
4724                                 for (k = 0; k < table->num_entries; k++) {
4725                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4726                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4727                                                 table->mc_reg_table_entry[k].mc_data[i] =
4728                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4729                                                         0x00000007;
4730                                 }
4731                                 break;
4732                         case mmMC_SEQ_WR_CTL_D0:
4733                                 for (k = 0; k < table->num_entries; k++) {
4734                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4735                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4736                                                 table->mc_reg_table_entry[k].mc_data[i] =
4737                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4738                                                         0x0000D0DD;
4739                                 }
4740                                 break;
4741                         case mmMC_SEQ_WR_CTL_D1:
4742                                 for (k = 0; k < table->num_entries; k++) {
4743                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4744                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4745                                                 table->mc_reg_table_entry[k].mc_data[i] =
4746                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4747                                                         0x0000D0DD;
4748                                 }
4749                                 break;
4750                         case mmMC_SEQ_WR_CTL_2:
4751                                 for (k = 0; k < table->num_entries; k++) {
4752                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4753                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4754                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4755                                 }
4756                                 break;
4757                         case mmMC_SEQ_CAS_TIMING:
4758                                 for (k = 0; k < table->num_entries; k++) {
4759                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4760                                                 table->mc_reg_table_entry[k].mc_data[i] =
4761                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4762                                                         0x000C0140;
4763                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4764                                                 table->mc_reg_table_entry[k].mc_data[i] =
4765                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4766                                                         0x000C0150;
4767                                 }
4768                                 break;
4769                         case mmMC_SEQ_MISC_TIMING:
4770                                 for (k = 0; k < table->num_entries; k++) {
4771                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4772                                                 table->mc_reg_table_entry[k].mc_data[i] =
4773                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4774                                                         0x00000030;
4775                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4776                                                 table->mc_reg_table_entry[k].mc_data[i] =
4777                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4778                                                         0x00000035;
4779                                 }
4780                                 break;
4781                         default:
4782                                 break;
4783                         }
4784                 }
4785
4786                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4787                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4788                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4789                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4790                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4791         }
4792
4793         return 0;
4794 }
4795
4796 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4797 {
4798         struct ci_power_info *pi = ci_get_pi(adev);
4799         struct atom_mc_reg_table *table;
4800         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4801         u8 module_index = ci_get_memory_module_index(adev);
4802         int ret;
4803
4804         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4805         if (!table)
4806                 return -ENOMEM;
4807
4808         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4809         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4810         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4811         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4812         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4813         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4814         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4815         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4816         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4817         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4818         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4819         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4820         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4821         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4822         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4823         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4824         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4825         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4826         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4827         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4828
4829         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4830         if (ret)
4831                 goto init_mc_done;
4832
4833         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4834         if (ret)
4835                 goto init_mc_done;
4836
4837         ci_set_s0_mc_reg_index(ci_table);
4838
4839         ret = ci_register_patching_mc_seq(adev, ci_table);
4840         if (ret)
4841                 goto init_mc_done;
4842
4843         ret = ci_set_mc_special_registers(adev, ci_table);
4844         if (ret)
4845                 goto init_mc_done;
4846
4847         ci_set_valid_flag(ci_table);
4848
4849 init_mc_done:
4850         kfree(table);
4851
4852         return ret;
4853 }
4854
4855 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4856                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4857 {
4858         struct ci_power_info *pi = ci_get_pi(adev);
4859         u32 i, j;
4860
4861         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4862                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4863                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4864                                 return -EINVAL;
4865                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4866                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4867                         i++;
4868                 }
4869         }
4870
4871         mc_reg_table->last = (u8)i;
4872
4873         return 0;
4874 }
4875
4876 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4877                                     SMU7_Discrete_MCRegisterSet *data,
4878                                     u32 num_entries, u32 valid_flag)
4879 {
4880         u32 i, j;
4881
4882         for (i = 0, j = 0; j < num_entries; j++) {
4883                 if (valid_flag & (1 << j)) {
4884                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4885                         i++;
4886                 }
4887         }
4888 }
4889
4890 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4891                                                  const u32 memory_clock,
4892                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4893 {
4894         struct ci_power_info *pi = ci_get_pi(adev);
4895         u32 i = 0;
4896
4897         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4898                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4899                         break;
4900         }
4901
4902         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4903                 --i;
4904
4905         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4906                                 mc_reg_table_data, pi->mc_reg_table.last,
4907                                 pi->mc_reg_table.valid_flag);
4908 }
4909
4910 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4911                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4912 {
4913         struct ci_power_info *pi = ci_get_pi(adev);
4914         u32 i;
4915
4916         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4917                 ci_convert_mc_reg_table_entry_to_smc(adev,
4918                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4919                                                      &mc_reg_table->data[i]);
4920 }
4921
4922 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4923 {
4924         struct ci_power_info *pi = ci_get_pi(adev);
4925         int ret;
4926
4927         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4928
4929         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4930         if (ret)
4931                 return ret;
4932         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4933
4934         return amdgpu_ci_copy_bytes_to_smc(adev,
4935                                     pi->mc_reg_table_start,
4936                                     (u8 *)&pi->smc_mc_reg_table,
4937                                     sizeof(SMU7_Discrete_MCRegisters),
4938                                     pi->sram_end);
4939 }
4940
4941 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4942 {
4943         struct ci_power_info *pi = ci_get_pi(adev);
4944
4945         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4946                 return 0;
4947
4948         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4949
4950         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4951
4952         return amdgpu_ci_copy_bytes_to_smc(adev,
4953                                     pi->mc_reg_table_start +
4954                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4955                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4956                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4957                                     pi->dpm_table.mclk_table.count,
4958                                     pi->sram_end);
4959 }
4960
4961 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4962 {
4963         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4964
4965         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4966         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4967 }
4968
4969 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4970                                                       struct amdgpu_ps *amdgpu_state)
4971 {
4972         struct ci_ps *state = ci_get_ps(amdgpu_state);
4973         int i;
4974         u16 pcie_speed, max_speed = 0;
4975
4976         for (i = 0; i < state->performance_level_count; i++) {
4977                 pcie_speed = state->performance_levels[i].pcie_gen;
4978                 if (max_speed < pcie_speed)
4979                         max_speed = pcie_speed;
4980         }
4981
4982         return max_speed;
4983 }
4984
4985 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4986 {
4987         u32 speed_cntl = 0;
4988
4989         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4990                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4991         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4992
4993         return (u16)speed_cntl;
4994 }
4995
4996 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4997 {
4998         u32 link_width = 0;
4999
5000         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
5001                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
5002         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
5003
5004         switch (link_width) {
5005         case 1:
5006                 return 1;
5007         case 2:
5008                 return 2;
5009         case 3:
5010                 return 4;
5011         case 4:
5012                 return 8;
5013         case 0:
5014         case 6:
5015         default:
5016                 return 16;
5017         }
5018 }
5019
5020 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
5021                                                              struct amdgpu_ps *amdgpu_new_state,
5022                                                              struct amdgpu_ps *amdgpu_current_state)
5023 {
5024         struct ci_power_info *pi = ci_get_pi(adev);
5025         enum amdgpu_pcie_gen target_link_speed =
5026                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5027         enum amdgpu_pcie_gen current_link_speed;
5028
5029         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
5030                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
5031         else
5032                 current_link_speed = pi->force_pcie_gen;
5033
5034         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5035         pi->pspp_notify_required = false;
5036         if (target_link_speed > current_link_speed) {
5037                 switch (target_link_speed) {
5038 #ifdef CONFIG_ACPI
5039                 case AMDGPU_PCIE_GEN3:
5040                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
5041                                 break;
5042                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
5043                         if (current_link_speed == AMDGPU_PCIE_GEN2)
5044                                 break;
5045                 case AMDGPU_PCIE_GEN2:
5046                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5047                                 break;
5048 #endif
5049                 default:
5050                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5051                         break;
5052                 }
5053         } else {
5054                 if (target_link_speed < current_link_speed)
5055                         pi->pspp_notify_required = true;
5056         }
5057 }
5058
5059 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5060                                                            struct amdgpu_ps *amdgpu_new_state,
5061                                                            struct amdgpu_ps *amdgpu_current_state)
5062 {
5063         struct ci_power_info *pi = ci_get_pi(adev);
5064         enum amdgpu_pcie_gen target_link_speed =
5065                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5066         u8 request;
5067
5068         if (pi->pspp_notify_required) {
5069                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5070                         request = PCIE_PERF_REQ_PECI_GEN3;
5071                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5072                         request = PCIE_PERF_REQ_PECI_GEN2;
5073                 else
5074                         request = PCIE_PERF_REQ_PECI_GEN1;
5075
5076                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5077                     (ci_get_current_pcie_speed(adev) > 0))
5078                         return;
5079
5080 #ifdef CONFIG_ACPI
5081                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5082 #endif
5083         }
5084 }
5085
5086 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5087 {
5088         struct ci_power_info *pi = ci_get_pi(adev);
5089         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5090                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5091         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5092                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5093         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5094                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5095
5096         if (allowed_sclk_vddc_table == NULL)
5097                 return -EINVAL;
5098         if (allowed_sclk_vddc_table->count < 1)
5099                 return -EINVAL;
5100         if (allowed_mclk_vddc_table == NULL)
5101                 return -EINVAL;
5102         if (allowed_mclk_vddc_table->count < 1)
5103                 return -EINVAL;
5104         if (allowed_mclk_vddci_table == NULL)
5105                 return -EINVAL;
5106         if (allowed_mclk_vddci_table->count < 1)
5107                 return -EINVAL;
5108
5109         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5110         pi->max_vddc_in_pp_table =
5111                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5112
5113         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5114         pi->max_vddci_in_pp_table =
5115                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5116
5117         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5118                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5119         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5120                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5121         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5122                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5123         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5124                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5125
5126         return 0;
5127 }
5128
5129 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5130 {
5131         struct ci_power_info *pi = ci_get_pi(adev);
5132         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5133         u32 leakage_index;
5134
5135         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5136                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5137                         *vddc = leakage_table->actual_voltage[leakage_index];
5138                         break;
5139                 }
5140         }
5141 }
5142
5143 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5144 {
5145         struct ci_power_info *pi = ci_get_pi(adev);
5146         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5147         u32 leakage_index;
5148
5149         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5150                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5151                         *vddci = leakage_table->actual_voltage[leakage_index];
5152                         break;
5153                 }
5154         }
5155 }
5156
5157 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5158                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5159 {
5160         u32 i;
5161
5162         if (table) {
5163                 for (i = 0; i < table->count; i++)
5164                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5165         }
5166 }
5167
5168 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5169                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5170 {
5171         u32 i;
5172
5173         if (table) {
5174                 for (i = 0; i < table->count; i++)
5175                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5176         }
5177 }
5178
5179 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5180                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5181 {
5182         u32 i;
5183
5184         if (table) {
5185                 for (i = 0; i < table->count; i++)
5186                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5187         }
5188 }
5189
5190 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5191                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5192 {
5193         u32 i;
5194
5195         if (table) {
5196                 for (i = 0; i < table->count; i++)
5197                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5198         }
5199 }
5200
5201 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5202                                                                    struct amdgpu_phase_shedding_limits_table *table)
5203 {
5204         u32 i;
5205
5206         if (table) {
5207                 for (i = 0; i < table->count; i++)
5208                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5209         }
5210 }
5211
5212 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5213                                                             struct amdgpu_clock_and_voltage_limits *table)
5214 {
5215         if (table) {
5216                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5217                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5218         }
5219 }
5220
5221 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5222                                                          struct amdgpu_cac_leakage_table *table)
5223 {
5224         u32 i;
5225
5226         if (table) {
5227                 for (i = 0; i < table->count; i++)
5228                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5229         }
5230 }
5231
5232 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5233 {
5234
5235         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5236                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5237         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5238                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5239         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5240                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5241         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5242                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5243         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5244                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5245         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5246                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5247         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5248                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5249         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5250                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5251         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5252                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5253         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5254                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5255         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5256                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5257         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5258                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5259
5260 }
5261
5262 static void ci_update_current_ps(struct amdgpu_device *adev,
5263                                  struct amdgpu_ps *rps)
5264 {
5265         struct ci_ps *new_ps = ci_get_ps(rps);
5266         struct ci_power_info *pi = ci_get_pi(adev);
5267
5268         pi->current_rps = *rps;
5269         pi->current_ps = *new_ps;
5270         pi->current_rps.ps_priv = &pi->current_ps;
5271         adev->pm.dpm.current_ps = &pi->current_rps;
5272 }
5273
5274 static void ci_update_requested_ps(struct amdgpu_device *adev,
5275                                    struct amdgpu_ps *rps)
5276 {
5277         struct ci_ps *new_ps = ci_get_ps(rps);
5278         struct ci_power_info *pi = ci_get_pi(adev);
5279
5280         pi->requested_rps = *rps;
5281         pi->requested_ps = *new_ps;
5282         pi->requested_rps.ps_priv = &pi->requested_ps;
5283         adev->pm.dpm.requested_ps = &pi->requested_rps;
5284 }
5285
5286 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5287 {
5288         struct ci_power_info *pi = ci_get_pi(adev);
5289         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5290         struct amdgpu_ps *new_ps = &requested_ps;
5291
5292         ci_update_requested_ps(adev, new_ps);
5293
5294         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5295
5296         return 0;
5297 }
5298
5299 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5300 {
5301         struct ci_power_info *pi = ci_get_pi(adev);
5302         struct amdgpu_ps *new_ps = &pi->requested_rps;
5303
5304         ci_update_current_ps(adev, new_ps);
5305 }
5306
5307
5308 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5309 {
5310         ci_read_clock_registers(adev);
5311         ci_enable_acpi_power_management(adev);
5312         ci_init_sclk_t(adev);
5313 }
5314
5315 static int ci_dpm_enable(struct amdgpu_device *adev)
5316 {
5317         struct ci_power_info *pi = ci_get_pi(adev);
5318         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5319         int ret;
5320
5321         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5322                 ci_enable_voltage_control(adev);
5323                 ret = ci_construct_voltage_tables(adev);
5324                 if (ret) {
5325                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5326                         return ret;
5327                 }
5328         }
5329         if (pi->caps_dynamic_ac_timing) {
5330                 ret = ci_initialize_mc_reg_table(adev);
5331                 if (ret)
5332                         pi->caps_dynamic_ac_timing = false;
5333         }
5334         if (pi->dynamic_ss)
5335                 ci_enable_spread_spectrum(adev, true);
5336         if (pi->thermal_protection)
5337                 ci_enable_thermal_protection(adev, true);
5338         ci_program_sstp(adev);
5339         ci_enable_display_gap(adev);
5340         ci_program_vc(adev);
5341         ret = ci_upload_firmware(adev);
5342         if (ret) {
5343                 DRM_ERROR("ci_upload_firmware failed\n");
5344                 return ret;
5345         }
5346         ret = ci_process_firmware_header(adev);
5347         if (ret) {
5348                 DRM_ERROR("ci_process_firmware_header failed\n");
5349                 return ret;
5350         }
5351         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5352         if (ret) {
5353                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5354                 return ret;
5355         }
5356         ret = ci_init_smc_table(adev);
5357         if (ret) {
5358                 DRM_ERROR("ci_init_smc_table failed\n");
5359                 return ret;
5360         }
5361         ret = ci_init_arb_table_index(adev);
5362         if (ret) {
5363                 DRM_ERROR("ci_init_arb_table_index failed\n");
5364                 return ret;
5365         }
5366         if (pi->caps_dynamic_ac_timing) {
5367                 ret = ci_populate_initial_mc_reg_table(adev);
5368                 if (ret) {
5369                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5370                         return ret;
5371                 }
5372         }
5373         ret = ci_populate_pm_base(adev);
5374         if (ret) {
5375                 DRM_ERROR("ci_populate_pm_base failed\n");
5376                 return ret;
5377         }
5378         ci_dpm_start_smc(adev);
5379         ci_enable_vr_hot_gpio_interrupt(adev);
5380         ret = ci_notify_smc_display_change(adev, false);
5381         if (ret) {
5382                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5383                 return ret;
5384         }
5385         ci_enable_sclk_control(adev, true);
5386         ret = ci_enable_ulv(adev, true);
5387         if (ret) {
5388                 DRM_ERROR("ci_enable_ulv failed\n");
5389                 return ret;
5390         }
5391         ret = ci_enable_ds_master_switch(adev, true);
5392         if (ret) {
5393                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5394                 return ret;
5395         }
5396         ret = ci_start_dpm(adev);
5397         if (ret) {
5398                 DRM_ERROR("ci_start_dpm failed\n");
5399                 return ret;
5400         }
5401         ret = ci_enable_didt(adev, true);
5402         if (ret) {
5403                 DRM_ERROR("ci_enable_didt failed\n");
5404                 return ret;
5405         }
5406         ret = ci_enable_smc_cac(adev, true);
5407         if (ret) {
5408                 DRM_ERROR("ci_enable_smc_cac failed\n");
5409                 return ret;
5410         }
5411         ret = ci_enable_power_containment(adev, true);
5412         if (ret) {
5413                 DRM_ERROR("ci_enable_power_containment failed\n");
5414                 return ret;
5415         }
5416
5417         ret = ci_power_control_set_level(adev);
5418         if (ret) {
5419                 DRM_ERROR("ci_power_control_set_level failed\n");
5420                 return ret;
5421         }
5422
5423         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5424
5425         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5426         if (ret) {
5427                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5428                 return ret;
5429         }
5430
5431         ci_thermal_start_thermal_controller(adev);
5432
5433         ci_update_current_ps(adev, boot_ps);
5434
5435         return 0;
5436 }
5437
5438 static void ci_dpm_disable(struct amdgpu_device *adev)
5439 {
5440         struct ci_power_info *pi = ci_get_pi(adev);
5441         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5442
5443         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5444                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5445         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5446                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5447
5448         ci_dpm_powergate_uvd(adev, true);
5449
5450         if (!amdgpu_ci_is_smc_running(adev))
5451                 return;
5452
5453         ci_thermal_stop_thermal_controller(adev);
5454
5455         if (pi->thermal_protection)
5456                 ci_enable_thermal_protection(adev, false);
5457         ci_enable_power_containment(adev, false);
5458         ci_enable_smc_cac(adev, false);
5459         ci_enable_didt(adev, false);
5460         ci_enable_spread_spectrum(adev, false);
5461         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5462         ci_stop_dpm(adev);
5463         ci_enable_ds_master_switch(adev, false);
5464         ci_enable_ulv(adev, false);
5465         ci_clear_vc(adev);
5466         ci_reset_to_default(adev);
5467         ci_dpm_stop_smc(adev);
5468         ci_force_switch_to_arb_f0(adev);
5469         ci_enable_thermal_based_sclk_dpm(adev, false);
5470
5471         ci_update_current_ps(adev, boot_ps);
5472 }
5473
5474 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5475 {
5476         struct ci_power_info *pi = ci_get_pi(adev);
5477         struct amdgpu_ps *new_ps = &pi->requested_rps;
5478         struct amdgpu_ps *old_ps = &pi->current_rps;
5479         int ret;
5480
5481         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5482         if (pi->pcie_performance_request)
5483                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5484         ret = ci_freeze_sclk_mclk_dpm(adev);
5485         if (ret) {
5486                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5487                 return ret;
5488         }
5489         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5490         if (ret) {
5491                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5492                 return ret;
5493         }
5494         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5495         if (ret) {
5496                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5497                 return ret;
5498         }
5499
5500         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5501         if (ret) {
5502                 DRM_ERROR("ci_update_vce_dpm failed\n");
5503                 return ret;
5504         }
5505
5506         ret = ci_update_sclk_t(adev);
5507         if (ret) {
5508                 DRM_ERROR("ci_update_sclk_t failed\n");
5509                 return ret;
5510         }
5511         if (pi->caps_dynamic_ac_timing) {
5512                 ret = ci_update_and_upload_mc_reg_table(adev);
5513                 if (ret) {
5514                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5515                         return ret;
5516                 }
5517         }
5518         ret = ci_program_memory_timing_parameters(adev);
5519         if (ret) {
5520                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5521                 return ret;
5522         }
5523         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5524         if (ret) {
5525                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5526                 return ret;
5527         }
5528         ret = ci_upload_dpm_level_enable_mask(adev);
5529         if (ret) {
5530                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5531                 return ret;
5532         }
5533         if (pi->pcie_performance_request)
5534                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5535
5536         return 0;
5537 }
5538
5539 #if 0
5540 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5541 {
5542         ci_set_boot_state(adev);
5543 }
5544 #endif
5545
5546 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5547 {
5548         ci_program_display_gap(adev);
5549 }
5550
5551 union power_info {
5552         struct _ATOM_POWERPLAY_INFO info;
5553         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5554         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5555         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5556         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5557         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5558 };
5559
5560 union pplib_clock_info {
5561         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5562         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5563         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5564         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5565         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5566         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5567 };
5568
5569 union pplib_power_state {
5570         struct _ATOM_PPLIB_STATE v1;
5571         struct _ATOM_PPLIB_STATE_V2 v2;
5572 };
5573
5574 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5575                                           struct amdgpu_ps *rps,
5576                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5577                                           u8 table_rev)
5578 {
5579         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5580         rps->class = le16_to_cpu(non_clock_info->usClassification);
5581         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5582
5583         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5584                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5585                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5586         } else {
5587                 rps->vclk = 0;
5588                 rps->dclk = 0;
5589         }
5590
5591         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5592                 adev->pm.dpm.boot_ps = rps;
5593         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5594                 adev->pm.dpm.uvd_ps = rps;
5595 }
5596
5597 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5598                                       struct amdgpu_ps *rps, int index,
5599                                       union pplib_clock_info *clock_info)
5600 {
5601         struct ci_power_info *pi = ci_get_pi(adev);
5602         struct ci_ps *ps = ci_get_ps(rps);
5603         struct ci_pl *pl = &ps->performance_levels[index];
5604
5605         ps->performance_level_count = index + 1;
5606
5607         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5608         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5609         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5610         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5611
5612         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5613                                                    pi->sys_pcie_mask,
5614                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5615                                                    clock_info->ci.ucPCIEGen);
5616         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5617                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5618                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5619
5620         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5621                 pi->acpi_pcie_gen = pl->pcie_gen;
5622         }
5623
5624         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5625                 pi->ulv.supported = true;
5626                 pi->ulv.pl = *pl;
5627                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5628         }
5629
5630         /* patch up boot state */
5631         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5632                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5633                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5634                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5635                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5636         }
5637
5638         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5639         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5640                 pi->use_pcie_powersaving_levels = true;
5641                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5642                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5643                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5644                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5645                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5646                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5647                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5648                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5649                 break;
5650         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5651                 pi->use_pcie_performance_levels = true;
5652                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5653                         pi->pcie_gen_performance.max = pl->pcie_gen;
5654                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5655                         pi->pcie_gen_performance.min = pl->pcie_gen;
5656                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5657                         pi->pcie_lane_performance.max = pl->pcie_lane;
5658                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5659                         pi->pcie_lane_performance.min = pl->pcie_lane;
5660                 break;
5661         default:
5662                 break;
5663         }
5664 }
5665
5666 static int ci_parse_power_table(struct amdgpu_device *adev)
5667 {
5668         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5669         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5670         union pplib_power_state *power_state;
5671         int i, j, k, non_clock_array_index, clock_array_index;
5672         union pplib_clock_info *clock_info;
5673         struct _StateArray *state_array;
5674         struct _ClockInfoArray *clock_info_array;
5675         struct _NonClockInfoArray *non_clock_info_array;
5676         union power_info *power_info;
5677         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5678         u16 data_offset;
5679         u8 frev, crev;
5680         u8 *power_state_offset;
5681         struct ci_ps *ps;
5682
5683         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5684                                    &frev, &crev, &data_offset))
5685                 return -EINVAL;
5686         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5687
5688         amdgpu_add_thermal_controller(adev);
5689
5690         state_array = (struct _StateArray *)
5691                 (mode_info->atom_context->bios + data_offset +
5692                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5693         clock_info_array = (struct _ClockInfoArray *)
5694                 (mode_info->atom_context->bios + data_offset +
5695                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5696         non_clock_info_array = (struct _NonClockInfoArray *)
5697                 (mode_info->atom_context->bios + data_offset +
5698                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5699
5700         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5701                                   state_array->ucNumEntries, GFP_KERNEL);
5702         if (!adev->pm.dpm.ps)
5703                 return -ENOMEM;
5704         power_state_offset = (u8 *)state_array->states;
5705         for (i = 0; i < state_array->ucNumEntries; i++) {
5706                 u8 *idx;
5707                 power_state = (union pplib_power_state *)power_state_offset;
5708                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5709                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5710                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5711                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5712                 if (ps == NULL) {
5713                         kfree(adev->pm.dpm.ps);
5714                         return -ENOMEM;
5715                 }
5716                 adev->pm.dpm.ps[i].ps_priv = ps;
5717                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5718                                               non_clock_info,
5719                                               non_clock_info_array->ucEntrySize);
5720                 k = 0;
5721                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5722                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5723                         clock_array_index = idx[j];
5724                         if (clock_array_index >= clock_info_array->ucNumEntries)
5725                                 continue;
5726                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5727                                 break;
5728                         clock_info = (union pplib_clock_info *)
5729                                 ((u8 *)&clock_info_array->clockInfo[0] +
5730                                  (clock_array_index * clock_info_array->ucEntrySize));
5731                         ci_parse_pplib_clock_info(adev,
5732                                                   &adev->pm.dpm.ps[i], k,
5733                                                   clock_info);
5734                         k++;
5735                 }
5736                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5737         }
5738         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5739
5740         /* fill in the vce power states */
5741         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
5742                 u32 sclk, mclk;
5743                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5744                 clock_info = (union pplib_clock_info *)
5745                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5746                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5747                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5748                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5749                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5750                 adev->pm.dpm.vce_states[i].sclk = sclk;
5751                 adev->pm.dpm.vce_states[i].mclk = mclk;
5752         }
5753
5754         return 0;
5755 }
5756
5757 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5758                                     struct ci_vbios_boot_state *boot_state)
5759 {
5760         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5761         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5762         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5763         u8 frev, crev;
5764         u16 data_offset;
5765
5766         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5767                                    &frev, &crev, &data_offset)) {
5768                 firmware_info =
5769                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5770                                                     data_offset);
5771                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5772                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5773                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5774                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5775                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5776                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5777                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5778
5779                 return 0;
5780         }
5781         return -EINVAL;
5782 }
5783
5784 static void ci_dpm_fini(struct amdgpu_device *adev)
5785 {
5786         int i;
5787
5788         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5789                 kfree(adev->pm.dpm.ps[i].ps_priv);
5790         }
5791         kfree(adev->pm.dpm.ps);
5792         kfree(adev->pm.dpm.priv);
5793         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5794         amdgpu_free_extended_power_table(adev);
5795 }
5796
5797 /**
5798  * ci_dpm_init_microcode - load ucode images from disk
5799  *
5800  * @adev: amdgpu_device pointer
5801  *
5802  * Use the firmware interface to load the ucode images into
5803  * the driver (not loaded into hw).
5804  * Returns 0 on success, error on failure.
5805  */
5806 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5807 {
5808         const char *chip_name;
5809         char fw_name[30];
5810         int err;
5811
5812         DRM_DEBUG("\n");
5813
5814         switch (adev->asic_type) {
5815         case CHIP_BONAIRE:
5816                 if ((adev->pdev->revision == 0x80) ||
5817                     (adev->pdev->revision == 0x81) ||
5818                     (adev->pdev->device == 0x665f))
5819                         chip_name = "bonaire_k";
5820                 else
5821                         chip_name = "bonaire";
5822                 break;
5823         case CHIP_HAWAII:
5824                 if (adev->pdev->revision == 0x80)
5825                         chip_name = "hawaii_k";
5826                 else
5827                         chip_name = "hawaii";
5828                 break;
5829         case CHIP_KAVERI:
5830         case CHIP_KABINI:
5831         case CHIP_MULLINS:
5832         default: BUG();
5833         }
5834
5835         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5836         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5837         if (err)
5838                 goto out;
5839         err = amdgpu_ucode_validate(adev->pm.fw);
5840
5841 out:
5842         if (err) {
5843                 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
5844                 release_firmware(adev->pm.fw);
5845                 adev->pm.fw = NULL;
5846         }
5847         return err;
5848 }
5849
5850 static int ci_dpm_init(struct amdgpu_device *adev)
5851 {
5852         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5853         SMU7_Discrete_DpmTable *dpm_table;
5854         struct amdgpu_gpio_rec gpio;
5855         u16 data_offset, size;
5856         u8 frev, crev;
5857         struct ci_power_info *pi;
5858         int ret;
5859
5860         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5861         if (pi == NULL)
5862                 return -ENOMEM;
5863         adev->pm.dpm.priv = pi;
5864
5865         pi->sys_pcie_mask =
5866                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5867                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5868
5869         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5870
5871         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5872         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5873         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5874         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5875
5876         pi->pcie_lane_performance.max = 0;
5877         pi->pcie_lane_performance.min = 16;
5878         pi->pcie_lane_powersaving.max = 0;
5879         pi->pcie_lane_powersaving.min = 16;
5880
5881         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5882         if (ret) {
5883                 ci_dpm_fini(adev);
5884                 return ret;
5885         }
5886
5887         ret = amdgpu_get_platform_caps(adev);
5888         if (ret) {
5889                 ci_dpm_fini(adev);
5890                 return ret;
5891         }
5892
5893         ret = amdgpu_parse_extended_power_table(adev);
5894         if (ret) {
5895                 ci_dpm_fini(adev);
5896                 return ret;
5897         }
5898
5899         ret = ci_parse_power_table(adev);
5900         if (ret) {
5901                 ci_dpm_fini(adev);
5902                 return ret;
5903         }
5904
5905         pi->dll_default_on = false;
5906         pi->sram_end = SMC_RAM_END;
5907
5908         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5909         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5910         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5911         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5912         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5913         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5914         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5915         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5916
5917         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5918
5919         pi->sclk_dpm_key_disabled = 0;
5920         pi->mclk_dpm_key_disabled = 0;
5921         pi->pcie_dpm_key_disabled = 0;
5922         pi->thermal_sclk_dpm_enabled = 0;
5923
5924         if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
5925                 pi->caps_sclk_ds = true;
5926         else
5927                 pi->caps_sclk_ds = false;
5928
5929         pi->mclk_strobe_mode_threshold = 40000;
5930         pi->mclk_stutter_mode_threshold = 40000;
5931         pi->mclk_edc_enable_threshold = 40000;
5932         pi->mclk_edc_wr_enable_threshold = 40000;
5933
5934         ci_initialize_powertune_defaults(adev);
5935
5936         pi->caps_fps = false;
5937
5938         pi->caps_sclk_throttle_low_notification = false;
5939
5940         pi->caps_uvd_dpm = true;
5941         pi->caps_vce_dpm = true;
5942
5943         ci_get_leakage_voltages(adev);
5944         ci_patch_dependency_tables_with_leakage(adev);
5945         ci_set_private_data_variables_based_on_pptable(adev);
5946
5947         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5948                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5949         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5950                 ci_dpm_fini(adev);
5951                 return -ENOMEM;
5952         }
5953         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5954         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5955         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5956         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5957         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5958         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5959         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5960         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5961         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5962
5963         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5964         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5965         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5966
5967         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5968         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5969         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5970         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5971
5972         if (adev->asic_type == CHIP_HAWAII) {
5973                 pi->thermal_temp_setting.temperature_low = 94500;
5974                 pi->thermal_temp_setting.temperature_high = 95000;
5975                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5976         } else {
5977                 pi->thermal_temp_setting.temperature_low = 99500;
5978                 pi->thermal_temp_setting.temperature_high = 100000;
5979                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5980         }
5981
5982         pi->uvd_enabled = false;
5983
5984         dpm_table = &pi->smc_state_table;
5985
5986         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5987         if (gpio.valid) {
5988                 dpm_table->VRHotGpio = gpio.shift;
5989                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5990         } else {
5991                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5992                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5993         }
5994
5995         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5996         if (gpio.valid) {
5997                 dpm_table->AcDcGpio = gpio.shift;
5998                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5999         } else {
6000                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
6001                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
6002         }
6003
6004         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
6005         if (gpio.valid) {
6006                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
6007
6008                 switch (gpio.shift) {
6009                 case 0:
6010                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6011                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6012                         break;
6013                 case 1:
6014                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
6015                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
6016                         break;
6017                 case 2:
6018                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
6019                         break;
6020                 case 3:
6021                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
6022                         break;
6023                 case 4:
6024                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
6025                         break;
6026                 default:
6027                         DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
6028                         break;
6029                 }
6030                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
6031         }
6032
6033         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6034         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6035         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
6036         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
6037                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6038         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
6039                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6040
6041         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
6042                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
6043                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6044                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
6045                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6046                 else
6047                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
6048         }
6049
6050         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6051                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6052                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6053                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6054                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6055                 else
6056                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6057         }
6058
6059         pi->vddc_phase_shed_control = true;
6060
6061 #if defined(CONFIG_ACPI)
6062         pi->pcie_performance_request =
6063                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6064 #else
6065         pi->pcie_performance_request = false;
6066 #endif
6067
6068         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6069                                    &frev, &crev, &data_offset)) {
6070                 pi->caps_sclk_ss_support = true;
6071                 pi->caps_mclk_ss_support = true;
6072                 pi->dynamic_ss = true;
6073         } else {
6074                 pi->caps_sclk_ss_support = false;
6075                 pi->caps_mclk_ss_support = false;
6076                 pi->dynamic_ss = true;
6077         }
6078
6079         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6080                 pi->thermal_protection = true;
6081         else
6082                 pi->thermal_protection = false;
6083
6084         pi->caps_dynamic_ac_timing = true;
6085
6086         pi->uvd_power_gated = true;
6087
6088         /* make sure dc limits are valid */
6089         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6090             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6091                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6092                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6093
6094         pi->fan_ctrl_is_in_default_mode = true;
6095
6096         return 0;
6097 }
6098
6099 static void
6100 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6101                                                struct seq_file *m)
6102 {
6103         struct ci_power_info *pi = ci_get_pi(adev);
6104         struct amdgpu_ps *rps = &pi->current_rps;
6105         u32 sclk = ci_get_average_sclk_freq(adev);
6106         u32 mclk = ci_get_average_mclk_freq(adev);
6107         u32 activity_percent = 50;
6108         int ret;
6109
6110         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6111                                         &activity_percent);
6112
6113         if (ret == 0) {
6114                 activity_percent += 0x80;
6115                 activity_percent >>= 8;
6116                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6117         }
6118
6119         seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
6120         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6121         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6122                    sclk, mclk);
6123         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6124 }
6125
6126 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6127                                      struct amdgpu_ps *rps)
6128 {
6129         struct ci_ps *ps = ci_get_ps(rps);
6130         struct ci_pl *pl;
6131         int i;
6132
6133         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6134         amdgpu_dpm_print_cap_info(rps->caps);
6135         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6136         for (i = 0; i < ps->performance_level_count; i++) {
6137                 pl = &ps->performance_levels[i];
6138                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6139                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6140         }
6141         amdgpu_dpm_print_ps_status(adev, rps);
6142 }
6143
6144 static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
6145                                                 const struct ci_pl *ci_cpl2)
6146 {
6147         return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
6148                   (ci_cpl1->sclk == ci_cpl2->sclk) &&
6149                   (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
6150                   (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
6151 }
6152
6153 static int ci_check_state_equal(struct amdgpu_device *adev,
6154                                 struct amdgpu_ps *cps,
6155                                 struct amdgpu_ps *rps,
6156                                 bool *equal)
6157 {
6158         struct ci_ps *ci_cps;
6159         struct ci_ps *ci_rps;
6160         int i;
6161
6162         if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
6163                 return -EINVAL;
6164
6165         ci_cps = ci_get_ps(cps);
6166         ci_rps = ci_get_ps(rps);
6167
6168         if (ci_cps == NULL) {
6169                 *equal = false;
6170                 return 0;
6171         }
6172
6173         if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
6174
6175                 *equal = false;
6176                 return 0;
6177         }
6178
6179         for (i = 0; i < ci_cps->performance_level_count; i++) {
6180                 if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
6181                                         &(ci_rps->performance_levels[i]))) {
6182                         *equal = false;
6183                         return 0;
6184                 }
6185         }
6186
6187         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6188         *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
6189         *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
6190
6191         return 0;
6192 }
6193
6194 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6195 {
6196         struct ci_power_info *pi = ci_get_pi(adev);
6197         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6198
6199         if (low)
6200                 return requested_state->performance_levels[0].sclk;
6201         else
6202                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6203 }
6204
6205 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6206 {
6207         struct ci_power_info *pi = ci_get_pi(adev);
6208         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6209
6210         if (low)
6211                 return requested_state->performance_levels[0].mclk;
6212         else
6213                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6214 }
6215
6216 /* get temperature in millidegrees */
6217 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6218 {
6219         u32 temp;
6220         int actual_temp = 0;
6221
6222         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6223                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6224
6225         if (temp & 0x200)
6226                 actual_temp = 255;
6227         else
6228                 actual_temp = temp & 0x1ff;
6229
6230         actual_temp = actual_temp * 1000;
6231
6232         return actual_temp;
6233 }
6234
6235 static int ci_set_temperature_range(struct amdgpu_device *adev)
6236 {
6237         int ret;
6238
6239         ret = ci_thermal_enable_alert(adev, false);
6240         if (ret)
6241                 return ret;
6242         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6243                                                CISLANDS_TEMP_RANGE_MAX);
6244         if (ret)
6245                 return ret;
6246         ret = ci_thermal_enable_alert(adev, true);
6247         if (ret)
6248                 return ret;
6249         return ret;
6250 }
6251
6252 static int ci_dpm_early_init(void *handle)
6253 {
6254         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6255
6256         ci_dpm_set_dpm_funcs(adev);
6257         ci_dpm_set_irq_funcs(adev);
6258
6259         return 0;
6260 }
6261
6262 static int ci_dpm_late_init(void *handle)
6263 {
6264         int ret;
6265         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6266
6267         if (!amdgpu_dpm)
6268                 return 0;
6269
6270         /* init the sysfs and debugfs files late */
6271         ret = amdgpu_pm_sysfs_init(adev);
6272         if (ret)
6273                 return ret;
6274
6275         ret = ci_set_temperature_range(adev);
6276         if (ret)
6277                 return ret;
6278
6279         return 0;
6280 }
6281
6282 static int ci_dpm_sw_init(void *handle)
6283 {
6284         int ret;
6285         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6286
6287         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
6288                                 &adev->pm.dpm.thermal.irq);
6289         if (ret)
6290                 return ret;
6291
6292         ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
6293                                 &adev->pm.dpm.thermal.irq);
6294         if (ret)
6295                 return ret;
6296
6297         /* default to balanced state */
6298         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6299         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6300         adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
6301         adev->pm.default_sclk = adev->clock.default_sclk;
6302         adev->pm.default_mclk = adev->clock.default_mclk;
6303         adev->pm.current_sclk = adev->clock.default_sclk;
6304         adev->pm.current_mclk = adev->clock.default_mclk;
6305         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6306
6307         ret = ci_dpm_init_microcode(adev);
6308         if (ret)
6309                 return ret;
6310
6311         if (amdgpu_dpm == 0)
6312                 return 0;
6313
6314         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6315         mutex_lock(&adev->pm.mutex);
6316         ret = ci_dpm_init(adev);
6317         if (ret)
6318                 goto dpm_failed;
6319         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6320         if (amdgpu_dpm == 1)
6321                 amdgpu_pm_print_power_states(adev);
6322         mutex_unlock(&adev->pm.mutex);
6323         DRM_INFO("amdgpu: dpm initialized\n");
6324
6325         return 0;
6326
6327 dpm_failed:
6328         ci_dpm_fini(adev);
6329         mutex_unlock(&adev->pm.mutex);
6330         DRM_ERROR("amdgpu: dpm initialization failed\n");
6331         return ret;
6332 }
6333
6334 static int ci_dpm_sw_fini(void *handle)
6335 {
6336         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6337
6338         flush_work(&adev->pm.dpm.thermal.work);
6339
6340         mutex_lock(&adev->pm.mutex);
6341         amdgpu_pm_sysfs_fini(adev);
6342         ci_dpm_fini(adev);
6343         mutex_unlock(&adev->pm.mutex);
6344
6345         release_firmware(adev->pm.fw);
6346         adev->pm.fw = NULL;
6347
6348         return 0;
6349 }
6350
6351 static int ci_dpm_hw_init(void *handle)
6352 {
6353         int ret;
6354
6355         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6356
6357         if (!amdgpu_dpm) {
6358                 ret = ci_upload_firmware(adev);
6359                 if (ret) {
6360                         DRM_ERROR("ci_upload_firmware failed\n");
6361                         return ret;
6362                 }
6363                 ci_dpm_start_smc(adev);
6364                 return 0;
6365         }
6366
6367         mutex_lock(&adev->pm.mutex);
6368         ci_dpm_setup_asic(adev);
6369         ret = ci_dpm_enable(adev);
6370         if (ret)
6371                 adev->pm.dpm_enabled = false;
6372         else
6373                 adev->pm.dpm_enabled = true;
6374         mutex_unlock(&adev->pm.mutex);
6375
6376         return ret;
6377 }
6378
6379 static int ci_dpm_hw_fini(void *handle)
6380 {
6381         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6382
6383         if (adev->pm.dpm_enabled) {
6384                 mutex_lock(&adev->pm.mutex);
6385                 ci_dpm_disable(adev);
6386                 mutex_unlock(&adev->pm.mutex);
6387         } else {
6388                 ci_dpm_stop_smc(adev);
6389         }
6390
6391         return 0;
6392 }
6393
6394 static int ci_dpm_suspend(void *handle)
6395 {
6396         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6397
6398         if (adev->pm.dpm_enabled) {
6399                 mutex_lock(&adev->pm.mutex);
6400                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6401                                AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
6402                 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
6403                                AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
6404                 adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
6405                 adev->pm.dpm.last_state = adev->pm.dpm.state;
6406                 adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
6407                 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
6408                 mutex_unlock(&adev->pm.mutex);
6409                 amdgpu_pm_compute_clocks(adev);
6410
6411         }
6412
6413         return 0;
6414 }
6415
6416 static int ci_dpm_resume(void *handle)
6417 {
6418         int ret;
6419         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6420
6421         if (adev->pm.dpm_enabled) {
6422                 /* asic init will reset to the boot state */
6423                 mutex_lock(&adev->pm.mutex);
6424                 ci_dpm_setup_asic(adev);
6425                 ret = ci_dpm_enable(adev);
6426                 if (ret)
6427                         adev->pm.dpm_enabled = false;
6428                 else
6429                         adev->pm.dpm_enabled = true;
6430                 adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
6431                 adev->pm.dpm.state = adev->pm.dpm.last_state;
6432                 mutex_unlock(&adev->pm.mutex);
6433                 if (adev->pm.dpm_enabled)
6434                         amdgpu_pm_compute_clocks(adev);
6435         }
6436         return 0;
6437 }
6438
6439 static bool ci_dpm_is_idle(void *handle)
6440 {
6441         /* XXX */
6442         return true;
6443 }
6444
6445 static int ci_dpm_wait_for_idle(void *handle)
6446 {
6447         /* XXX */
6448         return 0;
6449 }
6450
6451 static int ci_dpm_soft_reset(void *handle)
6452 {
6453         return 0;
6454 }
6455
6456 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6457                                       struct amdgpu_irq_src *source,
6458                                       unsigned type,
6459                                       enum amdgpu_interrupt_state state)
6460 {
6461         u32 cg_thermal_int;
6462
6463         switch (type) {
6464         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6465                 switch (state) {
6466                 case AMDGPU_IRQ_STATE_DISABLE:
6467                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6468                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6469                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6470                         break;
6471                 case AMDGPU_IRQ_STATE_ENABLE:
6472                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6473                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6474                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6475                         break;
6476                 default:
6477                         break;
6478                 }
6479                 break;
6480
6481         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6482                 switch (state) {
6483                 case AMDGPU_IRQ_STATE_DISABLE:
6484                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6485                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6486                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6487                         break;
6488                 case AMDGPU_IRQ_STATE_ENABLE:
6489                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6490                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6491                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6492                         break;
6493                 default:
6494                         break;
6495                 }
6496                 break;
6497
6498         default:
6499                 break;
6500         }
6501         return 0;
6502 }
6503
6504 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6505                                     struct amdgpu_irq_src *source,
6506                                     struct amdgpu_iv_entry *entry)
6507 {
6508         bool queue_thermal = false;
6509
6510         if (entry == NULL)
6511                 return -EINVAL;
6512
6513         switch (entry->src_id) {
6514         case 230: /* thermal low to high */
6515                 DRM_DEBUG("IH: thermal low to high\n");
6516                 adev->pm.dpm.thermal.high_to_low = false;
6517                 queue_thermal = true;
6518                 break;
6519         case 231: /* thermal high to low */
6520                 DRM_DEBUG("IH: thermal high to low\n");
6521                 adev->pm.dpm.thermal.high_to_low = true;
6522                 queue_thermal = true;
6523                 break;
6524         default:
6525                 break;
6526         }
6527
6528         if (queue_thermal)
6529                 schedule_work(&adev->pm.dpm.thermal.work);
6530
6531         return 0;
6532 }
6533
6534 static int ci_dpm_set_clockgating_state(void *handle,
6535                                           enum amd_clockgating_state state)
6536 {
6537         return 0;
6538 }
6539
6540 static int ci_dpm_set_powergating_state(void *handle,
6541                                           enum amd_powergating_state state)
6542 {
6543         return 0;
6544 }
6545
6546 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6547                 enum pp_clock_type type, char *buf)
6548 {
6549         struct ci_power_info *pi = ci_get_pi(adev);
6550         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6551         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6552         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6553
6554         int i, now, size = 0;
6555         uint32_t clock, pcie_speed;
6556
6557         switch (type) {
6558         case PP_SCLK:
6559                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6560                 clock = RREG32(mmSMC_MSG_ARG_0);
6561
6562                 for (i = 0; i < sclk_table->count; i++) {
6563                         if (clock > sclk_table->dpm_levels[i].value)
6564                                 continue;
6565                         break;
6566                 }
6567                 now = i;
6568
6569                 for (i = 0; i < sclk_table->count; i++)
6570                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6571                                         i, sclk_table->dpm_levels[i].value / 100,
6572                                         (i == now) ? "*" : "");
6573                 break;
6574         case PP_MCLK:
6575                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6576                 clock = RREG32(mmSMC_MSG_ARG_0);
6577
6578                 for (i = 0; i < mclk_table->count; i++) {
6579                         if (clock > mclk_table->dpm_levels[i].value)
6580                                 continue;
6581                         break;
6582                 }
6583                 now = i;
6584
6585                 for (i = 0; i < mclk_table->count; i++)
6586                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6587                                         i, mclk_table->dpm_levels[i].value / 100,
6588                                         (i == now) ? "*" : "");
6589                 break;
6590         case PP_PCIE:
6591                 pcie_speed = ci_get_current_pcie_speed(adev);
6592                 for (i = 0; i < pcie_table->count; i++) {
6593                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6594                                 continue;
6595                         break;
6596                 }
6597                 now = i;
6598
6599                 for (i = 0; i < pcie_table->count; i++)
6600                         size += sprintf(buf + size, "%d: %s %s\n", i,
6601                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6602                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6603                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6604                                         (i == now) ? "*" : "");
6605                 break;
6606         default:
6607                 break;
6608         }
6609
6610         return size;
6611 }
6612
6613 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6614                 enum pp_clock_type type, uint32_t mask)
6615 {
6616         struct ci_power_info *pi = ci_get_pi(adev);
6617
6618         if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
6619                                 AMD_DPM_FORCED_LEVEL_LOW |
6620                                 AMD_DPM_FORCED_LEVEL_HIGH))
6621                 return -EINVAL;
6622
6623         switch (type) {
6624         case PP_SCLK:
6625                 if (!pi->sclk_dpm_key_disabled)
6626                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6627                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6628                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6629                 break;
6630
6631         case PP_MCLK:
6632                 if (!pi->mclk_dpm_key_disabled)
6633                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6634                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6635                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6636                 break;
6637
6638         case PP_PCIE:
6639         {
6640                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6641                 uint32_t level = 0;
6642
6643                 while (tmp >>= 1)
6644                         level++;
6645
6646                 if (!pi->pcie_dpm_key_disabled)
6647                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6648                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6649                                         level);
6650                 break;
6651         }
6652         default:
6653                 break;
6654         }
6655
6656         return 0;
6657 }
6658
6659 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6660 {
6661         struct ci_power_info *pi = ci_get_pi(adev);
6662         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6663         struct ci_single_dpm_table *golden_sclk_table =
6664                         &(pi->golden_dpm_table.sclk_table);
6665         int value;
6666
6667         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6668                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6669                         100 /
6670                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6671
6672         return value;
6673 }
6674
6675 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6676 {
6677         struct ci_power_info *pi = ci_get_pi(adev);
6678         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6679         struct ci_single_dpm_table *golden_sclk_table =
6680                         &(pi->golden_dpm_table.sclk_table);
6681
6682         if (value > 20)
6683                 value = 20;
6684
6685         ps->performance_levels[ps->performance_level_count - 1].sclk =
6686                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6687                         value / 100 +
6688                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6689
6690         return 0;
6691 }
6692
6693 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6694 {
6695         struct ci_power_info *pi = ci_get_pi(adev);
6696         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6697         struct ci_single_dpm_table *golden_mclk_table =
6698                         &(pi->golden_dpm_table.mclk_table);
6699         int value;
6700
6701         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6702                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6703                         100 /
6704                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6705
6706         return value;
6707 }
6708
6709 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6710 {
6711         struct ci_power_info *pi = ci_get_pi(adev);
6712         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6713         struct ci_single_dpm_table *golden_mclk_table =
6714                         &(pi->golden_dpm_table.mclk_table);
6715
6716         if (value > 20)
6717                 value = 20;
6718
6719         ps->performance_levels[ps->performance_level_count - 1].mclk =
6720                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6721                         value / 100 +
6722                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6723
6724         return 0;
6725 }
6726
6727 static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
6728                 struct amd_pp_profile *query)
6729 {
6730         struct ci_power_info *pi = ci_get_pi(adev);
6731
6732         if (!pi || !query)
6733                 return -EINVAL;
6734
6735         if (query->type == AMD_PP_GFX_PROFILE)
6736                 memcpy(query, &pi->gfx_power_profile,
6737                                 sizeof(struct amd_pp_profile));
6738         else if (query->type == AMD_PP_COMPUTE_PROFILE)
6739                 memcpy(query, &pi->compute_power_profile,
6740                                 sizeof(struct amd_pp_profile));
6741         else
6742                 return -EINVAL;
6743
6744         return 0;
6745 }
6746
6747 static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
6748                 struct amd_pp_profile *request)
6749 {
6750         struct ci_power_info *pi = ci_get_pi(adev);
6751         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6752         struct SMU7_Discrete_GraphicsLevel *levels =
6753                         pi->smc_state_table.GraphicsLevel;
6754         uint32_t array = pi->dpm_table_start +
6755                         offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
6756         uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
6757                         SMU7_MAX_LEVELS_GRAPHICS;
6758         uint32_t i;
6759
6760         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6761                 levels[i].ActivityLevel =
6762                                 cpu_to_be16(request->activity_threshold);
6763                 levels[i].EnabledForActivity = 1;
6764                 levels[i].UpH = request->up_hyst;
6765                 levels[i].DownH = request->down_hyst;
6766         }
6767
6768         return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
6769                                 array_size, pi->sram_end);
6770 }
6771
6772 static void ci_find_min_clock_masks(struct amdgpu_device *adev,
6773                 uint32_t *sclk_mask, uint32_t *mclk_mask,
6774                 uint32_t min_sclk, uint32_t min_mclk)
6775 {
6776         struct ci_power_info *pi = ci_get_pi(adev);
6777         struct ci_dpm_table *dpm_table = &(pi->dpm_table);
6778         uint32_t i;
6779
6780         for (i = 0; i < dpm_table->sclk_table.count; i++) {
6781                 if (dpm_table->sclk_table.dpm_levels[i].enabled &&
6782                         dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
6783                         *sclk_mask |= 1 << i;
6784         }
6785
6786         for (i = 0; i < dpm_table->mclk_table.count; i++) {
6787                 if (dpm_table->mclk_table.dpm_levels[i].enabled &&
6788                         dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
6789                         *mclk_mask |= 1 << i;
6790         }
6791 }
6792
6793 static int ci_set_power_profile_state(struct amdgpu_device *adev,
6794                 struct amd_pp_profile *request)
6795 {
6796         struct ci_power_info *pi = ci_get_pi(adev);
6797         int tmp_result, result = 0;
6798         uint32_t sclk_mask = 0, mclk_mask = 0;
6799
6800         tmp_result = ci_freeze_sclk_mclk_dpm(adev);
6801         if (tmp_result) {
6802                 DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
6803                 result = tmp_result;
6804         }
6805
6806         tmp_result = ci_populate_requested_graphic_levels(adev,
6807                         request);
6808         if (tmp_result) {
6809                 DRM_ERROR("Failed to populate requested graphic levels!");
6810                 result = tmp_result;
6811         }
6812
6813         tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
6814         if (tmp_result) {
6815                 DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
6816                 result = tmp_result;
6817         }
6818
6819         ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
6820                         request->min_sclk, request->min_mclk);
6821
6822         if (sclk_mask) {
6823                 if (!pi->sclk_dpm_key_disabled)
6824                         amdgpu_ci_send_msg_to_smc_with_parameter(
6825                                 adev,
6826                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6827                                 pi->dpm_level_enable_mask.
6828                                 sclk_dpm_enable_mask &
6829                                 sclk_mask);
6830         }
6831
6832         if (mclk_mask) {
6833                 if (!pi->mclk_dpm_key_disabled)
6834                         amdgpu_ci_send_msg_to_smc_with_parameter(
6835                                 adev,
6836                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6837                                 pi->dpm_level_enable_mask.
6838                                 mclk_dpm_enable_mask &
6839                                 mclk_mask);
6840         }
6841
6842
6843         return result;
6844 }
6845
6846 static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
6847                 struct amd_pp_profile *request)
6848 {
6849         struct ci_power_info *pi = ci_get_pi(adev);
6850         int ret = -1;
6851
6852         if (!pi || !request)
6853                 return -EINVAL;
6854
6855         if (adev->pm.dpm.forced_level !=
6856                         AMD_DPM_FORCED_LEVEL_AUTO)
6857                 return -EINVAL;
6858
6859         if (request->min_sclk ||
6860                 request->min_mclk ||
6861                 request->activity_threshold ||
6862                 request->up_hyst ||
6863                 request->down_hyst) {
6864                 if (request->type == AMD_PP_GFX_PROFILE)
6865                         memcpy(&pi->gfx_power_profile, request,
6866                                         sizeof(struct amd_pp_profile));
6867                 else if (request->type == AMD_PP_COMPUTE_PROFILE)
6868                         memcpy(&pi->compute_power_profile, request,
6869                                         sizeof(struct amd_pp_profile));
6870                 else
6871                         return -EINVAL;
6872
6873                 if (request->type == pi->current_power_profile)
6874                         ret = ci_set_power_profile_state(
6875                                         adev,
6876                                         request);
6877         } else {
6878                 /* set power profile if it exists */
6879                 switch (request->type) {
6880                 case AMD_PP_GFX_PROFILE:
6881                         ret = ci_set_power_profile_state(
6882                                 adev,
6883                                 &pi->gfx_power_profile);
6884                         break;
6885                 case AMD_PP_COMPUTE_PROFILE:
6886                         ret = ci_set_power_profile_state(
6887                                 adev,
6888                                 &pi->compute_power_profile);
6889                         break;
6890                 default:
6891                         return -EINVAL;
6892                 }
6893         }
6894
6895         if (!ret)
6896                 pi->current_power_profile = request->type;
6897
6898         return 0;
6899 }
6900
6901 static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
6902                 struct amd_pp_profile *request)
6903 {
6904         struct ci_power_info *pi = ci_get_pi(adev);
6905
6906         if (!pi || !request)
6907                 return -EINVAL;
6908
6909         if (request->type == AMD_PP_GFX_PROFILE) {
6910                 pi->gfx_power_profile = pi->default_gfx_power_profile;
6911                 return ci_dpm_set_power_profile_state(adev,
6912                                           &pi->gfx_power_profile);
6913         } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
6914                 pi->compute_power_profile =
6915                         pi->default_compute_power_profile;
6916                 return ci_dpm_set_power_profile_state(adev,
6917                                           &pi->compute_power_profile);
6918         } else
6919                 return -EINVAL;
6920 }
6921
6922 static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
6923                 enum amd_pp_profile_type type)
6924 {
6925         struct ci_power_info *pi = ci_get_pi(adev);
6926         struct amd_pp_profile request = {0};
6927
6928         if (!pi)
6929                 return -EINVAL;
6930
6931         if (pi->current_power_profile != type) {
6932                 request.type = type;
6933                 return ci_dpm_set_power_profile_state(adev, &request);
6934         }
6935
6936         return 0;
6937 }
6938
6939 static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
6940                               void *value, int *size)
6941 {
6942         u32 activity_percent = 50;
6943         int ret;
6944
6945         /* size must be at least 4 bytes for all sensors */
6946         if (*size < 4)
6947                 return -EINVAL;
6948
6949         switch (idx) {
6950         case AMDGPU_PP_SENSOR_GFX_SCLK:
6951                 *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
6952                 *size = 4;
6953                 return 0;
6954         case AMDGPU_PP_SENSOR_GFX_MCLK:
6955                 *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
6956                 *size = 4;
6957                 return 0;
6958         case AMDGPU_PP_SENSOR_GPU_TEMP:
6959                 *((uint32_t *)value) = ci_dpm_get_temp(adev);
6960                 *size = 4;
6961                 return 0;
6962         case AMDGPU_PP_SENSOR_GPU_LOAD:
6963                 ret = ci_read_smc_soft_register(adev,
6964                                                 offsetof(SMU7_SoftRegisters,
6965                                                          AverageGraphicsA),
6966                                                 &activity_percent);
6967                 if (ret == 0) {
6968                         activity_percent += 0x80;
6969                         activity_percent >>= 8;
6970                         activity_percent =
6971                                 activity_percent > 100 ? 100 : activity_percent;
6972                 }
6973                 *((uint32_t *)value) = activity_percent;
6974                 *size = 4;
6975                 return 0;
6976         default:
6977                 return -EINVAL;
6978         }
6979 }
6980
6981 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6982         .name = "ci_dpm",
6983         .early_init = ci_dpm_early_init,
6984         .late_init = ci_dpm_late_init,
6985         .sw_init = ci_dpm_sw_init,
6986         .sw_fini = ci_dpm_sw_fini,
6987         .hw_init = ci_dpm_hw_init,
6988         .hw_fini = ci_dpm_hw_fini,
6989         .suspend = ci_dpm_suspend,
6990         .resume = ci_dpm_resume,
6991         .is_idle = ci_dpm_is_idle,
6992         .wait_for_idle = ci_dpm_wait_for_idle,
6993         .soft_reset = ci_dpm_soft_reset,
6994         .set_clockgating_state = ci_dpm_set_clockgating_state,
6995         .set_powergating_state = ci_dpm_set_powergating_state,
6996 };
6997
6998 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6999         .get_temperature = &ci_dpm_get_temp,
7000         .pre_set_power_state = &ci_dpm_pre_set_power_state,
7001         .set_power_state = &ci_dpm_set_power_state,
7002         .post_set_power_state = &ci_dpm_post_set_power_state,
7003         .display_configuration_changed = &ci_dpm_display_configuration_changed,
7004         .get_sclk = &ci_dpm_get_sclk,
7005         .get_mclk = &ci_dpm_get_mclk,
7006         .print_power_state = &ci_dpm_print_power_state,
7007         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
7008         .force_performance_level = &ci_dpm_force_performance_level,
7009         .vblank_too_short = &ci_dpm_vblank_too_short,
7010         .powergate_uvd = &ci_dpm_powergate_uvd,
7011         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
7012         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
7013         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
7014         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
7015         .print_clock_levels = ci_dpm_print_clock_levels,
7016         .force_clock_level = ci_dpm_force_clock_level,
7017         .get_sclk_od = ci_dpm_get_sclk_od,
7018         .set_sclk_od = ci_dpm_set_sclk_od,
7019         .get_mclk_od = ci_dpm_get_mclk_od,
7020         .set_mclk_od = ci_dpm_set_mclk_od,
7021         .check_state_equal = ci_check_state_equal,
7022         .get_vce_clock_state = amdgpu_get_vce_clock_state,
7023         .get_power_profile_state = ci_dpm_get_power_profile_state,
7024         .set_power_profile_state = ci_dpm_set_power_profile_state,
7025         .reset_power_profile_state = ci_dpm_reset_power_profile_state,
7026         .switch_power_profile = ci_dpm_switch_power_profile,
7027         .read_sensor = ci_dpm_read_sensor,
7028 };
7029
7030 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
7031 {
7032         if (adev->pm.funcs == NULL)
7033                 adev->pm.funcs = &ci_dpm_funcs;
7034 }
7035
7036 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
7037         .set = ci_dpm_set_interrupt_state,
7038         .process = ci_dpm_process_interrupt,
7039 };
7040
7041 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
7042 {
7043         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
7044         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
7045 }