Merge tag 'iommu-fixes-v5.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / dce_v10_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <drm/drmP.h>
24 #include "amdgpu.h"
25 #include "amdgpu_pm.h"
26 #include "amdgpu_i2c.h"
27 #include "vid.h"
28 #include "atom.h"
29 #include "amdgpu_atombios.h"
30 #include "atombios_crtc.h"
31 #include "atombios_encoders.h"
32 #include "amdgpu_pll.h"
33 #include "amdgpu_connectors.h"
34 #include "amdgpu_display.h"
35 #include "dce_v10_0.h"
36
37 #include "dce/dce_10_0_d.h"
38 #include "dce/dce_10_0_sh_mask.h"
39 #include "dce/dce_10_0_enum.h"
40 #include "oss/oss_3_0_d.h"
41 #include "oss/oss_3_0_sh_mask.h"
42 #include "gmc/gmc_8_1_d.h"
43 #include "gmc/gmc_8_1_sh_mask.h"
44
45 #include "ivsrcid/ivsrcid_vislands30.h"
46
47 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
48 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
49
50 static const u32 crtc_offsets[] =
51 {
52         CRTC0_REGISTER_OFFSET,
53         CRTC1_REGISTER_OFFSET,
54         CRTC2_REGISTER_OFFSET,
55         CRTC3_REGISTER_OFFSET,
56         CRTC4_REGISTER_OFFSET,
57         CRTC5_REGISTER_OFFSET,
58         CRTC6_REGISTER_OFFSET
59 };
60
61 static const u32 hpd_offsets[] =
62 {
63         HPD0_REGISTER_OFFSET,
64         HPD1_REGISTER_OFFSET,
65         HPD2_REGISTER_OFFSET,
66         HPD3_REGISTER_OFFSET,
67         HPD4_REGISTER_OFFSET,
68         HPD5_REGISTER_OFFSET
69 };
70
71 static const uint32_t dig_offsets[] = {
72         DIG0_REGISTER_OFFSET,
73         DIG1_REGISTER_OFFSET,
74         DIG2_REGISTER_OFFSET,
75         DIG3_REGISTER_OFFSET,
76         DIG4_REGISTER_OFFSET,
77         DIG5_REGISTER_OFFSET,
78         DIG6_REGISTER_OFFSET
79 };
80
81 static const struct {
82         uint32_t        reg;
83         uint32_t        vblank;
84         uint32_t        vline;
85         uint32_t        hpd;
86
87 } interrupt_status_offsets[] = { {
88         .reg = mmDISP_INTERRUPT_STATUS,
89         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
90         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
91         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
92 }, {
93         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
94         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
95         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
96         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
97 }, {
98         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
99         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
100         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
101         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
102 }, {
103         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
104         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
105         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
106         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
107 }, {
108         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
109         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
110         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
111         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
112 }, {
113         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
114         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
115         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
116         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
117 } };
118
119 static const u32 golden_settings_tonga_a11[] =
120 {
121         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
122         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
123         mmFBC_MISC, 0x1f311fff, 0x12300000,
124         mmHDMI_CONTROL, 0x31000111, 0x00000011,
125 };
126
127 static const u32 tonga_mgcg_cgcg_init[] =
128 {
129         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
130         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
131 };
132
133 static const u32 golden_settings_fiji_a10[] =
134 {
135         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
136         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
137         mmFBC_MISC, 0x1f311fff, 0x12300000,
138         mmHDMI_CONTROL, 0x31000111, 0x00000011,
139 };
140
141 static const u32 fiji_mgcg_cgcg_init[] =
142 {
143         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
144         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
145 };
146
147 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
148 {
149         switch (adev->asic_type) {
150         case CHIP_FIJI:
151                 amdgpu_device_program_register_sequence(adev,
152                                                         fiji_mgcg_cgcg_init,
153                                                         ARRAY_SIZE(fiji_mgcg_cgcg_init));
154                 amdgpu_device_program_register_sequence(adev,
155                                                         golden_settings_fiji_a10,
156                                                         ARRAY_SIZE(golden_settings_fiji_a10));
157                 break;
158         case CHIP_TONGA:
159                 amdgpu_device_program_register_sequence(adev,
160                                                         tonga_mgcg_cgcg_init,
161                                                         ARRAY_SIZE(tonga_mgcg_cgcg_init));
162                 amdgpu_device_program_register_sequence(adev,
163                                                         golden_settings_tonga_a11,
164                                                         ARRAY_SIZE(golden_settings_tonga_a11));
165                 break;
166         default:
167                 break;
168         }
169 }
170
171 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev,
172                                      u32 block_offset, u32 reg)
173 {
174         unsigned long flags;
175         u32 r;
176
177         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
178         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
179         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
180         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
181
182         return r;
183 }
184
185 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
186                                       u32 block_offset, u32 reg, u32 v)
187 {
188         unsigned long flags;
189
190         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
191         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
192         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
193         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
194 }
195
196 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
197 {
198         if (crtc >= adev->mode_info.num_crtc)
199                 return 0;
200         else
201                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
202 }
203
204 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
205 {
206         unsigned i;
207
208         /* Enable pflip interrupts */
209         for (i = 0; i < adev->mode_info.num_crtc; i++)
210                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
211 }
212
213 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
214 {
215         unsigned i;
216
217         /* Disable pflip interrupts */
218         for (i = 0; i < adev->mode_info.num_crtc; i++)
219                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
220 }
221
222 /**
223  * dce_v10_0_page_flip - pageflip callback.
224  *
225  * @adev: amdgpu_device pointer
226  * @crtc_id: crtc to cleanup pageflip on
227  * @crtc_base: new address of the crtc (GPU MC address)
228  *
229  * Triggers the actual pageflip by updating the primary
230  * surface base address.
231  */
232 static void dce_v10_0_page_flip(struct amdgpu_device *adev,
233                                 int crtc_id, u64 crtc_base, bool async)
234 {
235         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
236         u32 tmp;
237
238         /* flip at hsync for async, default is vsync */
239         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
240         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
241                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
242         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
243         /* update the primary scanout address */
244         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
245                upper_32_bits(crtc_base));
246         /* writing to the low address triggers the update */
247         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
248                lower_32_bits(crtc_base));
249         /* post the write */
250         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
251 }
252
253 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
254                                         u32 *vbl, u32 *position)
255 {
256         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
257                 return -EINVAL;
258
259         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
260         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
261
262         return 0;
263 }
264
265 /**
266  * dce_v10_0_hpd_sense - hpd sense callback.
267  *
268  * @adev: amdgpu_device pointer
269  * @hpd: hpd (hotplug detect) pin
270  *
271  * Checks if a digital monitor is connected (evergreen+).
272  * Returns true if connected, false if not connected.
273  */
274 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev,
275                                enum amdgpu_hpd_id hpd)
276 {
277         bool connected = false;
278
279         if (hpd >= adev->mode_info.num_hpd)
280                 return connected;
281
282         if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
283             DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
284                 connected = true;
285
286         return connected;
287 }
288
289 /**
290  * dce_v10_0_hpd_set_polarity - hpd set polarity callback.
291  *
292  * @adev: amdgpu_device pointer
293  * @hpd: hpd (hotplug detect) pin
294  *
295  * Set the polarity of the hpd pin (evergreen+).
296  */
297 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
298                                       enum amdgpu_hpd_id hpd)
299 {
300         u32 tmp;
301         bool connected = dce_v10_0_hpd_sense(adev, hpd);
302
303         if (hpd >= adev->mode_info.num_hpd)
304                 return;
305
306         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
307         if (connected)
308                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
309         else
310                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
311         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
312 }
313
314 /**
315  * dce_v10_0_hpd_init - hpd setup callback.
316  *
317  * @adev: amdgpu_device pointer
318  *
319  * Setup the hpd pins used by the card (evergreen+).
320  * Enable the pin, set the polarity, and enable the hpd interrupts.
321  */
322 static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
323 {
324         struct drm_device *dev = adev->ddev;
325         struct drm_connector *connector;
326         u32 tmp;
327
328         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
329                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
330
331                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
332                         continue;
333
334                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
335                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
336                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
337                          * aux dp channel on imac and help (but not completely fix)
338                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
339                          * also avoid interrupt storms during dpms.
340                          */
341                         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
342                         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
343                         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
344                         continue;
345                 }
346
347                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
348                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
349                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
350
351                 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
352                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
353                                     DC_HPD_CONNECT_INT_DELAY,
354                                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
355                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
356                                     DC_HPD_DISCONNECT_INT_DELAY,
357                                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
358                 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
359
360                 dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
361                 amdgpu_irq_get(adev, &adev->hpd_irq,
362                                amdgpu_connector->hpd.hpd);
363         }
364 }
365
366 /**
367  * dce_v10_0_hpd_fini - hpd tear down callback.
368  *
369  * @adev: amdgpu_device pointer
370  *
371  * Tear down the hpd pins used by the card (evergreen+).
372  * Disable the hpd interrupts.
373  */
374 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
375 {
376         struct drm_device *dev = adev->ddev;
377         struct drm_connector *connector;
378         u32 tmp;
379
380         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
381                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
382
383                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
384                         continue;
385
386                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
387                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
388                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
389
390                 amdgpu_irq_put(adev, &adev->hpd_irq,
391                                amdgpu_connector->hpd.hpd);
392         }
393 }
394
395 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
396 {
397         return mmDC_GPIO_HPD_A;
398 }
399
400 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
401 {
402         u32 crtc_hung = 0;
403         u32 crtc_status[6];
404         u32 i, j, tmp;
405
406         for (i = 0; i < adev->mode_info.num_crtc; i++) {
407                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
408                 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
409                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
410                         crtc_hung |= (1 << i);
411                 }
412         }
413
414         for (j = 0; j < 10; j++) {
415                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
416                         if (crtc_hung & (1 << i)) {
417                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
418                                 if (tmp != crtc_status[i])
419                                         crtc_hung &= ~(1 << i);
420                         }
421                 }
422                 if (crtc_hung == 0)
423                         return false;
424                 udelay(100);
425         }
426
427         return true;
428 }
429
430 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
431                                            bool render)
432 {
433         u32 tmp;
434
435         /* Lockout access through VGA aperture*/
436         tmp = RREG32(mmVGA_HDP_CONTROL);
437         if (render)
438                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
439         else
440                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
441         WREG32(mmVGA_HDP_CONTROL, tmp);
442
443         /* disable VGA render */
444         tmp = RREG32(mmVGA_RENDER_CONTROL);
445         if (render)
446                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
447         else
448                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
449         WREG32(mmVGA_RENDER_CONTROL, tmp);
450 }
451
452 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev)
453 {
454         int num_crtc = 0;
455
456         switch (adev->asic_type) {
457         case CHIP_FIJI:
458         case CHIP_TONGA:
459                 num_crtc = 6;
460                 break;
461         default:
462                 num_crtc = 0;
463         }
464         return num_crtc;
465 }
466
467 void dce_v10_0_disable_dce(struct amdgpu_device *adev)
468 {
469         /*Disable VGA render and enabled crtc, if has DCE engine*/
470         if (amdgpu_atombios_has_dce_engine_info(adev)) {
471                 u32 tmp;
472                 int crtc_enabled, i;
473
474                 dce_v10_0_set_vga_render_state(adev, false);
475
476                 /*Disable crtc*/
477                 for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) {
478                         crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
479                                                                          CRTC_CONTROL, CRTC_MASTER_EN);
480                         if (crtc_enabled) {
481                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
482                                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
483                                 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
484                                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
485                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
486                         }
487                 }
488         }
489 }
490
491 static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
492 {
493         struct drm_device *dev = encoder->dev;
494         struct amdgpu_device *adev = dev->dev_private;
495         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
496         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
497         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
498         int bpc = 0;
499         u32 tmp = 0;
500         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
501
502         if (connector) {
503                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
504                 bpc = amdgpu_connector_get_monitor_bpc(connector);
505                 dither = amdgpu_connector->dither;
506         }
507
508         /* LVDS/eDP FMT is set up by atom */
509         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
510                 return;
511
512         /* not needed for analog */
513         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
514             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
515                 return;
516
517         if (bpc == 0)
518                 return;
519
520         switch (bpc) {
521         case 6:
522                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
523                         /* XXX sort out optimal dither settings */
524                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
525                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
526                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
527                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
528                 } else {
529                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
530                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
531                 }
532                 break;
533         case 8:
534                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
535                         /* XXX sort out optimal dither settings */
536                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
537                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
538                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
539                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
540                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
541                 } else {
542                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
543                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
544                 }
545                 break;
546         case 10:
547                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
548                         /* XXX sort out optimal dither settings */
549                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
550                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
551                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
552                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
553                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
554                 } else {
555                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
556                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
557                 }
558                 break;
559         default:
560                 /* not needed */
561                 break;
562         }
563
564         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
565 }
566
567
568 /* display watermark setup */
569 /**
570  * dce_v10_0_line_buffer_adjust - Set up the line buffer
571  *
572  * @adev: amdgpu_device pointer
573  * @amdgpu_crtc: the selected display controller
574  * @mode: the current display mode on the selected display
575  * controller
576  *
577  * Setup up the line buffer allocation for
578  * the selected display controller (CIK).
579  * Returns the line buffer size in pixels.
580  */
581 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev,
582                                        struct amdgpu_crtc *amdgpu_crtc,
583                                        struct drm_display_mode *mode)
584 {
585         u32 tmp, buffer_alloc, i, mem_cfg;
586         u32 pipe_offset = amdgpu_crtc->crtc_id;
587         /*
588          * Line Buffer Setup
589          * There are 6 line buffers, one for each display controllers.
590          * There are 3 partitions per LB. Select the number of partitions
591          * to enable based on the display width.  For display widths larger
592          * than 4096, you need use to use 2 display controllers and combine
593          * them using the stereo blender.
594          */
595         if (amdgpu_crtc->base.enabled && mode) {
596                 if (mode->crtc_hdisplay < 1920) {
597                         mem_cfg = 1;
598                         buffer_alloc = 2;
599                 } else if (mode->crtc_hdisplay < 2560) {
600                         mem_cfg = 2;
601                         buffer_alloc = 2;
602                 } else if (mode->crtc_hdisplay < 4096) {
603                         mem_cfg = 0;
604                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
605                 } else {
606                         DRM_DEBUG_KMS("Mode too big for LB!\n");
607                         mem_cfg = 0;
608                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
609                 }
610         } else {
611                 mem_cfg = 1;
612                 buffer_alloc = 0;
613         }
614
615         tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
616         tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
617         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
618
619         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
620         tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
621         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
622
623         for (i = 0; i < adev->usec_timeout; i++) {
624                 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
625                 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
626                         break;
627                 udelay(1);
628         }
629
630         if (amdgpu_crtc->base.enabled && mode) {
631                 switch (mem_cfg) {
632                 case 0:
633                 default:
634                         return 4096 * 2;
635                 case 1:
636                         return 1920 * 2;
637                 case 2:
638                         return 2560 * 2;
639                 }
640         }
641
642         /* controller not enabled, so no lb used */
643         return 0;
644 }
645
646 /**
647  * cik_get_number_of_dram_channels - get the number of dram channels
648  *
649  * @adev: amdgpu_device pointer
650  *
651  * Look up the number of video ram channels (CIK).
652  * Used for display watermark bandwidth calculations
653  * Returns the number of dram channels
654  */
655 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
656 {
657         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
658
659         switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
660         case 0:
661         default:
662                 return 1;
663         case 1:
664                 return 2;
665         case 2:
666                 return 4;
667         case 3:
668                 return 8;
669         case 4:
670                 return 3;
671         case 5:
672                 return 6;
673         case 6:
674                 return 10;
675         case 7:
676                 return 12;
677         case 8:
678                 return 16;
679         }
680 }
681
682 struct dce10_wm_params {
683         u32 dram_channels; /* number of dram channels */
684         u32 yclk;          /* bandwidth per dram data pin in kHz */
685         u32 sclk;          /* engine clock in kHz */
686         u32 disp_clk;      /* display clock in kHz */
687         u32 src_width;     /* viewport width */
688         u32 active_time;   /* active display time in ns */
689         u32 blank_time;    /* blank time in ns */
690         bool interlaced;    /* mode is interlaced */
691         fixed20_12 vsc;    /* vertical scale ratio */
692         u32 num_heads;     /* number of active crtcs */
693         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
694         u32 lb_size;       /* line buffer allocated to pipe */
695         u32 vtaps;         /* vertical scaler taps */
696 };
697
698 /**
699  * dce_v10_0_dram_bandwidth - get the dram bandwidth
700  *
701  * @wm: watermark calculation data
702  *
703  * Calculate the raw dram bandwidth (CIK).
704  * Used for display watermark bandwidth calculations
705  * Returns the dram bandwidth in MBytes/s
706  */
707 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm)
708 {
709         /* Calculate raw DRAM Bandwidth */
710         fixed20_12 dram_efficiency; /* 0.7 */
711         fixed20_12 yclk, dram_channels, bandwidth;
712         fixed20_12 a;
713
714         a.full = dfixed_const(1000);
715         yclk.full = dfixed_const(wm->yclk);
716         yclk.full = dfixed_div(yclk, a);
717         dram_channels.full = dfixed_const(wm->dram_channels * 4);
718         a.full = dfixed_const(10);
719         dram_efficiency.full = dfixed_const(7);
720         dram_efficiency.full = dfixed_div(dram_efficiency, a);
721         bandwidth.full = dfixed_mul(dram_channels, yclk);
722         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
723
724         return dfixed_trunc(bandwidth);
725 }
726
727 /**
728  * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display
729  *
730  * @wm: watermark calculation data
731  *
732  * Calculate the dram bandwidth used for display (CIK).
733  * Used for display watermark bandwidth calculations
734  * Returns the dram bandwidth for display in MBytes/s
735  */
736 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
737 {
738         /* Calculate DRAM Bandwidth and the part allocated to display. */
739         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
740         fixed20_12 yclk, dram_channels, bandwidth;
741         fixed20_12 a;
742
743         a.full = dfixed_const(1000);
744         yclk.full = dfixed_const(wm->yclk);
745         yclk.full = dfixed_div(yclk, a);
746         dram_channels.full = dfixed_const(wm->dram_channels * 4);
747         a.full = dfixed_const(10);
748         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
749         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
750         bandwidth.full = dfixed_mul(dram_channels, yclk);
751         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
752
753         return dfixed_trunc(bandwidth);
754 }
755
756 /**
757  * dce_v10_0_data_return_bandwidth - get the data return bandwidth
758  *
759  * @wm: watermark calculation data
760  *
761  * Calculate the data return bandwidth used for display (CIK).
762  * Used for display watermark bandwidth calculations
763  * Returns the data return bandwidth in MBytes/s
764  */
765 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm)
766 {
767         /* Calculate the display Data return Bandwidth */
768         fixed20_12 return_efficiency; /* 0.8 */
769         fixed20_12 sclk, bandwidth;
770         fixed20_12 a;
771
772         a.full = dfixed_const(1000);
773         sclk.full = dfixed_const(wm->sclk);
774         sclk.full = dfixed_div(sclk, a);
775         a.full = dfixed_const(10);
776         return_efficiency.full = dfixed_const(8);
777         return_efficiency.full = dfixed_div(return_efficiency, a);
778         a.full = dfixed_const(32);
779         bandwidth.full = dfixed_mul(a, sclk);
780         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
781
782         return dfixed_trunc(bandwidth);
783 }
784
785 /**
786  * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth
787  *
788  * @wm: watermark calculation data
789  *
790  * Calculate the dmif bandwidth used for display (CIK).
791  * Used for display watermark bandwidth calculations
792  * Returns the dmif bandwidth in MBytes/s
793  */
794 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
795 {
796         /* Calculate the DMIF Request Bandwidth */
797         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
798         fixed20_12 disp_clk, bandwidth;
799         fixed20_12 a, b;
800
801         a.full = dfixed_const(1000);
802         disp_clk.full = dfixed_const(wm->disp_clk);
803         disp_clk.full = dfixed_div(disp_clk, a);
804         a.full = dfixed_const(32);
805         b.full = dfixed_mul(a, disp_clk);
806
807         a.full = dfixed_const(10);
808         disp_clk_request_efficiency.full = dfixed_const(8);
809         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
810
811         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
812
813         return dfixed_trunc(bandwidth);
814 }
815
816 /**
817  * dce_v10_0_available_bandwidth - get the min available bandwidth
818  *
819  * @wm: watermark calculation data
820  *
821  * Calculate the min available bandwidth used for display (CIK).
822  * Used for display watermark bandwidth calculations
823  * Returns the min available bandwidth in MBytes/s
824  */
825 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm)
826 {
827         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
828         u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm);
829         u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm);
830         u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm);
831
832         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
833 }
834
835 /**
836  * dce_v10_0_average_bandwidth - get the average available bandwidth
837  *
838  * @wm: watermark calculation data
839  *
840  * Calculate the average available bandwidth used for display (CIK).
841  * Used for display watermark bandwidth calculations
842  * Returns the average available bandwidth in MBytes/s
843  */
844 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm)
845 {
846         /* Calculate the display mode Average Bandwidth
847          * DisplayMode should contain the source and destination dimensions,
848          * timing, etc.
849          */
850         fixed20_12 bpp;
851         fixed20_12 line_time;
852         fixed20_12 src_width;
853         fixed20_12 bandwidth;
854         fixed20_12 a;
855
856         a.full = dfixed_const(1000);
857         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
858         line_time.full = dfixed_div(line_time, a);
859         bpp.full = dfixed_const(wm->bytes_per_pixel);
860         src_width.full = dfixed_const(wm->src_width);
861         bandwidth.full = dfixed_mul(src_width, bpp);
862         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
863         bandwidth.full = dfixed_div(bandwidth, line_time);
864
865         return dfixed_trunc(bandwidth);
866 }
867
868 /**
869  * dce_v10_0_latency_watermark - get the latency watermark
870  *
871  * @wm: watermark calculation data
872  *
873  * Calculate the latency watermark (CIK).
874  * Used for display watermark bandwidth calculations
875  * Returns the latency watermark in ns
876  */
877 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
878 {
879         /* First calculate the latency in ns */
880         u32 mc_latency = 2000; /* 2000 ns. */
881         u32 available_bandwidth = dce_v10_0_available_bandwidth(wm);
882         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
883         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
884         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
885         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
886                 (wm->num_heads * cursor_line_pair_return_time);
887         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
888         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
889         u32 tmp, dmif_size = 12288;
890         fixed20_12 a, b, c;
891
892         if (wm->num_heads == 0)
893                 return 0;
894
895         a.full = dfixed_const(2);
896         b.full = dfixed_const(1);
897         if ((wm->vsc.full > a.full) ||
898             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
899             (wm->vtaps >= 5) ||
900             ((wm->vsc.full >= a.full) && wm->interlaced))
901                 max_src_lines_per_dst_line = 4;
902         else
903                 max_src_lines_per_dst_line = 2;
904
905         a.full = dfixed_const(available_bandwidth);
906         b.full = dfixed_const(wm->num_heads);
907         a.full = dfixed_div(a, b);
908         tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
909         tmp = min(dfixed_trunc(a), tmp);
910
911         lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
912
913         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
914         b.full = dfixed_const(1000);
915         c.full = dfixed_const(lb_fill_bw);
916         b.full = dfixed_div(c, b);
917         a.full = dfixed_div(a, b);
918         line_fill_time = dfixed_trunc(a);
919
920         if (line_fill_time < wm->active_time)
921                 return latency;
922         else
923                 return latency + (line_fill_time - wm->active_time);
924
925 }
926
927 /**
928  * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check
929  * average and available dram bandwidth
930  *
931  * @wm: watermark calculation data
932  *
933  * Check if the display average bandwidth fits in the display
934  * dram bandwidth (CIK).
935  * Used for display watermark bandwidth calculations
936  * Returns true if the display fits, false if not.
937  */
938 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
939 {
940         if (dce_v10_0_average_bandwidth(wm) <=
941             (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads))
942                 return true;
943         else
944                 return false;
945 }
946
947 /**
948  * dce_v10_0_average_bandwidth_vs_available_bandwidth - check
949  * average and available bandwidth
950  *
951  * @wm: watermark calculation data
952  *
953  * Check if the display average bandwidth fits in the display
954  * available bandwidth (CIK).
955  * Used for display watermark bandwidth calculations
956  * Returns true if the display fits, false if not.
957  */
958 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
959 {
960         if (dce_v10_0_average_bandwidth(wm) <=
961             (dce_v10_0_available_bandwidth(wm) / wm->num_heads))
962                 return true;
963         else
964                 return false;
965 }
966
967 /**
968  * dce_v10_0_check_latency_hiding - check latency hiding
969  *
970  * @wm: watermark calculation data
971  *
972  * Check latency hiding (CIK).
973  * Used for display watermark bandwidth calculations
974  * Returns true if the display fits, false if not.
975  */
976 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm)
977 {
978         u32 lb_partitions = wm->lb_size / wm->src_width;
979         u32 line_time = wm->active_time + wm->blank_time;
980         u32 latency_tolerant_lines;
981         u32 latency_hiding;
982         fixed20_12 a;
983
984         a.full = dfixed_const(1);
985         if (wm->vsc.full > a.full)
986                 latency_tolerant_lines = 1;
987         else {
988                 if (lb_partitions <= (wm->vtaps + 1))
989                         latency_tolerant_lines = 1;
990                 else
991                         latency_tolerant_lines = 2;
992         }
993
994         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
995
996         if (dce_v10_0_latency_watermark(wm) <= latency_hiding)
997                 return true;
998         else
999                 return false;
1000 }
1001
1002 /**
1003  * dce_v10_0_program_watermarks - program display watermarks
1004  *
1005  * @adev: amdgpu_device pointer
1006  * @amdgpu_crtc: the selected display controller
1007  * @lb_size: line buffer size
1008  * @num_heads: number of display controllers in use
1009  *
1010  * Calculate and program the display watermarks for the
1011  * selected display controller (CIK).
1012  */
1013 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1014                                         struct amdgpu_crtc *amdgpu_crtc,
1015                                         u32 lb_size, u32 num_heads)
1016 {
1017         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1018         struct dce10_wm_params wm_low, wm_high;
1019         u32 active_time;
1020         u32 line_time = 0;
1021         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1022         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1023
1024         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1025                 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1026                                             (u32)mode->clock);
1027                 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1028                                           (u32)mode->clock);
1029                 line_time = min(line_time, (u32)65535);
1030
1031                 /* watermark for high clocks */
1032                 if (adev->pm.dpm_enabled) {
1033                         wm_high.yclk =
1034                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1035                         wm_high.sclk =
1036                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1037                 } else {
1038                         wm_high.yclk = adev->pm.current_mclk * 10;
1039                         wm_high.sclk = adev->pm.current_sclk * 10;
1040                 }
1041
1042                 wm_high.disp_clk = mode->clock;
1043                 wm_high.src_width = mode->crtc_hdisplay;
1044                 wm_high.active_time = active_time;
1045                 wm_high.blank_time = line_time - wm_high.active_time;
1046                 wm_high.interlaced = false;
1047                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1048                         wm_high.interlaced = true;
1049                 wm_high.vsc = amdgpu_crtc->vsc;
1050                 wm_high.vtaps = 1;
1051                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1052                         wm_high.vtaps = 2;
1053                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1054                 wm_high.lb_size = lb_size;
1055                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1056                 wm_high.num_heads = num_heads;
1057
1058                 /* set for high clocks */
1059                 latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
1060
1061                 /* possibly force display priority to high */
1062                 /* should really do this at mode validation time... */
1063                 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1064                     !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1065                     !dce_v10_0_check_latency_hiding(&wm_high) ||
1066                     (adev->mode_info.disp_priority == 2)) {
1067                         DRM_DEBUG_KMS("force priority to high\n");
1068                 }
1069
1070                 /* watermark for low clocks */
1071                 if (adev->pm.dpm_enabled) {
1072                         wm_low.yclk =
1073                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1074                         wm_low.sclk =
1075                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1076                 } else {
1077                         wm_low.yclk = adev->pm.current_mclk * 10;
1078                         wm_low.sclk = adev->pm.current_sclk * 10;
1079                 }
1080
1081                 wm_low.disp_clk = mode->clock;
1082                 wm_low.src_width = mode->crtc_hdisplay;
1083                 wm_low.active_time = active_time;
1084                 wm_low.blank_time = line_time - wm_low.active_time;
1085                 wm_low.interlaced = false;
1086                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1087                         wm_low.interlaced = true;
1088                 wm_low.vsc = amdgpu_crtc->vsc;
1089                 wm_low.vtaps = 1;
1090                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1091                         wm_low.vtaps = 2;
1092                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1093                 wm_low.lb_size = lb_size;
1094                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1095                 wm_low.num_heads = num_heads;
1096
1097                 /* set for low clocks */
1098                 latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
1099
1100                 /* possibly force display priority to high */
1101                 /* should really do this at mode validation time... */
1102                 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1103                     !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1104                     !dce_v10_0_check_latency_hiding(&wm_low) ||
1105                     (adev->mode_info.disp_priority == 2)) {
1106                         DRM_DEBUG_KMS("force priority to high\n");
1107                 }
1108                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1109         }
1110
1111         /* select wm A */
1112         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1113         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1114         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1115         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1116         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1117         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1118         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1119         /* select wm B */
1120         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1121         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1122         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1123         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1124         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1125         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1126         /* restore original selection */
1127         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1128
1129         /* save values for DPM */
1130         amdgpu_crtc->line_time = line_time;
1131         amdgpu_crtc->wm_high = latency_watermark_a;
1132         amdgpu_crtc->wm_low = latency_watermark_b;
1133         /* Save number of lines the linebuffer leads before the scanout */
1134         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1135 }
1136
1137 /**
1138  * dce_v10_0_bandwidth_update - program display watermarks
1139  *
1140  * @adev: amdgpu_device pointer
1141  *
1142  * Calculate and program the display watermarks and line
1143  * buffer allocation (CIK).
1144  */
1145 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
1146 {
1147         struct drm_display_mode *mode = NULL;
1148         u32 num_heads = 0, lb_size;
1149         int i;
1150
1151         amdgpu_display_update_priority(adev);
1152
1153         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1154                 if (adev->mode_info.crtcs[i]->base.enabled)
1155                         num_heads++;
1156         }
1157         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1158                 mode = &adev->mode_info.crtcs[i]->base.mode;
1159                 lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1160                 dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1161                                             lb_size, num_heads);
1162         }
1163 }
1164
1165 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev)
1166 {
1167         int i;
1168         u32 offset, tmp;
1169
1170         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1171                 offset = adev->mode_info.audio.pin[i].offset;
1172                 tmp = RREG32_AUDIO_ENDPT(offset,
1173                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1174                 if (((tmp &
1175                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1176                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1177                         adev->mode_info.audio.pin[i].connected = false;
1178                 else
1179                         adev->mode_info.audio.pin[i].connected = true;
1180         }
1181 }
1182
1183 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev)
1184 {
1185         int i;
1186
1187         dce_v10_0_audio_get_connected_pins(adev);
1188
1189         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1190                 if (adev->mode_info.audio.pin[i].connected)
1191                         return &adev->mode_info.audio.pin[i];
1192         }
1193         DRM_ERROR("No connected audio pins found!\n");
1194         return NULL;
1195 }
1196
1197 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1198 {
1199         struct amdgpu_device *adev = encoder->dev->dev_private;
1200         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1201         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1202         u32 tmp;
1203
1204         if (!dig || !dig->afmt || !dig->afmt->pin)
1205                 return;
1206
1207         tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1208         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1209         WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1210 }
1211
1212 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
1213                                                 struct drm_display_mode *mode)
1214 {
1215         struct amdgpu_device *adev = encoder->dev->dev_private;
1216         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1217         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1218         struct drm_connector *connector;
1219         struct amdgpu_connector *amdgpu_connector = NULL;
1220         u32 tmp;
1221         int interlace = 0;
1222
1223         if (!dig || !dig->afmt || !dig->afmt->pin)
1224                 return;
1225
1226         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1227                 if (connector->encoder == encoder) {
1228                         amdgpu_connector = to_amdgpu_connector(connector);
1229                         break;
1230                 }
1231         }
1232
1233         if (!amdgpu_connector) {
1234                 DRM_ERROR("Couldn't find encoder's connector\n");
1235                 return;
1236         }
1237
1238         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1239                 interlace = 1;
1240         if (connector->latency_present[interlace]) {
1241                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1242                                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1243                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1244                                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1245         } else {
1246                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1247                                     VIDEO_LIPSYNC, 0);
1248                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1249                                     AUDIO_LIPSYNC, 0);
1250         }
1251         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1252                            ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1253 }
1254
1255 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1256 {
1257         struct amdgpu_device *adev = encoder->dev->dev_private;
1258         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1259         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1260         struct drm_connector *connector;
1261         struct amdgpu_connector *amdgpu_connector = NULL;
1262         u32 tmp;
1263         u8 *sadb = NULL;
1264         int sad_count;
1265
1266         if (!dig || !dig->afmt || !dig->afmt->pin)
1267                 return;
1268
1269         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1270                 if (connector->encoder == encoder) {
1271                         amdgpu_connector = to_amdgpu_connector(connector);
1272                         break;
1273                 }
1274         }
1275
1276         if (!amdgpu_connector) {
1277                 DRM_ERROR("Couldn't find encoder's connector\n");
1278                 return;
1279         }
1280
1281         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1282         if (sad_count < 0) {
1283                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1284                 sad_count = 0;
1285         }
1286
1287         /* program the speaker allocation */
1288         tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1289                                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1290         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1291                             DP_CONNECTION, 0);
1292         /* set HDMI mode */
1293         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1294                             HDMI_CONNECTION, 1);
1295         if (sad_count)
1296                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1297                                     SPEAKER_ALLOCATION, sadb[0]);
1298         else
1299                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1300                                     SPEAKER_ALLOCATION, 5); /* stereo */
1301         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1302                            ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1303
1304         kfree(sadb);
1305 }
1306
1307 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
1308 {
1309         struct amdgpu_device *adev = encoder->dev->dev_private;
1310         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1311         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1312         struct drm_connector *connector;
1313         struct amdgpu_connector *amdgpu_connector = NULL;
1314         struct cea_sad *sads;
1315         int i, sad_count;
1316
1317         static const u16 eld_reg_to_type[][2] = {
1318                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1319                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1320                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1321                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1322                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1323                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1324                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1325                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1326                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1327                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1328                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1329                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1330         };
1331
1332         if (!dig || !dig->afmt || !dig->afmt->pin)
1333                 return;
1334
1335         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1336                 if (connector->encoder == encoder) {
1337                         amdgpu_connector = to_amdgpu_connector(connector);
1338                         break;
1339                 }
1340         }
1341
1342         if (!amdgpu_connector) {
1343                 DRM_ERROR("Couldn't find encoder's connector\n");
1344                 return;
1345         }
1346
1347         sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1348         if (sad_count <= 0) {
1349                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1350                 return;
1351         }
1352         BUG_ON(!sads);
1353
1354         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1355                 u32 tmp = 0;
1356                 u8 stereo_freqs = 0;
1357                 int max_channels = -1;
1358                 int j;
1359
1360                 for (j = 0; j < sad_count; j++) {
1361                         struct cea_sad *sad = &sads[j];
1362
1363                         if (sad->format == eld_reg_to_type[i][1]) {
1364                                 if (sad->channels > max_channels) {
1365                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1366                                                             MAX_CHANNELS, sad->channels);
1367                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1368                                                             DESCRIPTOR_BYTE_2, sad->byte2);
1369                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1370                                                             SUPPORTED_FREQUENCIES, sad->freq);
1371                                         max_channels = sad->channels;
1372                                 }
1373
1374                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1375                                         stereo_freqs |= sad->freq;
1376                                 else
1377                                         break;
1378                         }
1379                 }
1380
1381                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1382                                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1383                 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1384         }
1385
1386         kfree(sads);
1387 }
1388
1389 static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
1390                                   struct amdgpu_audio_pin *pin,
1391                                   bool enable)
1392 {
1393         if (!pin)
1394                 return;
1395
1396         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1397                            enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1398 }
1399
1400 static const u32 pin_offsets[] =
1401 {
1402         AUD0_REGISTER_OFFSET,
1403         AUD1_REGISTER_OFFSET,
1404         AUD2_REGISTER_OFFSET,
1405         AUD3_REGISTER_OFFSET,
1406         AUD4_REGISTER_OFFSET,
1407         AUD5_REGISTER_OFFSET,
1408         AUD6_REGISTER_OFFSET,
1409 };
1410
1411 static int dce_v10_0_audio_init(struct amdgpu_device *adev)
1412 {
1413         int i;
1414
1415         if (!amdgpu_audio)
1416                 return 0;
1417
1418         adev->mode_info.audio.enabled = true;
1419
1420         adev->mode_info.audio.num_pins = 7;
1421
1422         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1423                 adev->mode_info.audio.pin[i].channels = -1;
1424                 adev->mode_info.audio.pin[i].rate = -1;
1425                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1426                 adev->mode_info.audio.pin[i].status_bits = 0;
1427                 adev->mode_info.audio.pin[i].category_code = 0;
1428                 adev->mode_info.audio.pin[i].connected = false;
1429                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1430                 adev->mode_info.audio.pin[i].id = i;
1431                 /* disable audio.  it will be set up later */
1432                 /* XXX remove once we switch to ip funcs */
1433                 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1434         }
1435
1436         return 0;
1437 }
1438
1439 static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
1440 {
1441         int i;
1442
1443         if (!amdgpu_audio)
1444                 return;
1445
1446         if (!adev->mode_info.audio.enabled)
1447                 return;
1448
1449         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1450                 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1451
1452         adev->mode_info.audio.enabled = false;
1453 }
1454
1455 /*
1456  * update the N and CTS parameters for a given pixel clock rate
1457  */
1458 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1459 {
1460         struct drm_device *dev = encoder->dev;
1461         struct amdgpu_device *adev = dev->dev_private;
1462         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1463         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1464         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1465         u32 tmp;
1466
1467         tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1468         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1469         WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1470         tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1471         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1472         WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1473
1474         tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1475         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1476         WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1477         tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1478         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1479         WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1480
1481         tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1482         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1483         WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1484         tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1485         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1486         WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1487
1488 }
1489
1490 /*
1491  * build a HDMI Video Info Frame
1492  */
1493 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1494                                                void *buffer, size_t size)
1495 {
1496         struct drm_device *dev = encoder->dev;
1497         struct amdgpu_device *adev = dev->dev_private;
1498         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1499         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1500         uint8_t *frame = buffer + 3;
1501         uint8_t *header = buffer;
1502
1503         WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1504                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1505         WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1506                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1507         WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1508                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1509         WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1510                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1511 }
1512
1513 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1514 {
1515         struct drm_device *dev = encoder->dev;
1516         struct amdgpu_device *adev = dev->dev_private;
1517         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1518         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1519         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1520         u32 dto_phase = 24 * 1000;
1521         u32 dto_modulo = clock;
1522         u32 tmp;
1523
1524         if (!dig || !dig->afmt)
1525                 return;
1526
1527         /* XXX two dtos; generally use dto0 for hdmi */
1528         /* Express [24MHz / target pixel clock] as an exact rational
1529          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1530          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1531          */
1532         tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1533         tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1534                             amdgpu_crtc->crtc_id);
1535         WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1536         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1537         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1538 }
1539
1540 /*
1541  * update the info frames with the data from the current display mode
1542  */
1543 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
1544                                   struct drm_display_mode *mode)
1545 {
1546         struct drm_device *dev = encoder->dev;
1547         struct amdgpu_device *adev = dev->dev_private;
1548         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1549         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1550         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1551         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1552         struct hdmi_avi_infoframe frame;
1553         ssize_t err;
1554         u32 tmp;
1555         int bpc = 8;
1556
1557         if (!dig || !dig->afmt)
1558                 return;
1559
1560         /* Silent, r600_hdmi_enable will raise WARN for us */
1561         if (!dig->afmt->enabled)
1562                 return;
1563
1564         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1565         if (encoder->crtc) {
1566                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1567                 bpc = amdgpu_crtc->bpc;
1568         }
1569
1570         /* disable audio prior to setting up hw */
1571         dig->afmt->pin = dce_v10_0_audio_get_pin(adev);
1572         dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1573
1574         dce_v10_0_audio_set_dto(encoder, mode->clock);
1575
1576         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1577         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1578         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1579
1580         WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1581
1582         tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1583         switch (bpc) {
1584         case 0:
1585         case 6:
1586         case 8:
1587         case 16:
1588         default:
1589                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1590                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1591                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1592                           connector->name, bpc);
1593                 break;
1594         case 10:
1595                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1596                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1597                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1598                           connector->name);
1599                 break;
1600         case 12:
1601                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1602                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1603                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1604                           connector->name);
1605                 break;
1606         }
1607         WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1608
1609         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1610         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1611         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1612         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1613         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1614
1615         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1616         /* enable audio info frames (frames won't be set until audio is enabled) */
1617         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1618         /* required for audio info values to be updated */
1619         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1620         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1621
1622         tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1623         /* required for audio info values to be updated */
1624         tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1625         WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1626
1627         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1628         /* anything other than 0 */
1629         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1630         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1631
1632         WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1633
1634         tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1635         /* set the default audio delay */
1636         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1637         /* should be suffient for all audio modes and small enough for all hblanks */
1638         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1639         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1640
1641         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1642         /* allow 60958 channel status fields to be updated */
1643         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1644         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1645
1646         tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1647         if (bpc > 8)
1648                 /* clear SW CTS value */
1649                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1650         else
1651                 /* select SW CTS value */
1652                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1653         /* allow hw to sent ACR packets when required */
1654         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1655         WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1656
1657         dce_v10_0_afmt_update_ACR(encoder, mode->clock);
1658
1659         tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1660         tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1661         WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1662
1663         tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1664         tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1665         WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1666
1667         tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1668         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1669         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1670         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1671         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1672         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1673         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1674         WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1675
1676         dce_v10_0_audio_write_speaker_allocation(encoder);
1677
1678         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1679                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1680
1681         dce_v10_0_afmt_audio_select_pin(encoder);
1682         dce_v10_0_audio_write_sad_regs(encoder);
1683         dce_v10_0_audio_write_latency_fields(encoder, mode);
1684
1685         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false);
1686         if (err < 0) {
1687                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1688                 return;
1689         }
1690
1691         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1692         if (err < 0) {
1693                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1694                 return;
1695         }
1696
1697         dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1698
1699         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1700         /* enable AVI info frames */
1701         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1702         /* required for audio info values to be updated */
1703         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1704         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1705
1706         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1707         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1708         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1709
1710         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1711         /* send audio packets */
1712         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1713         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1714
1715         WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1716         WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1717         WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1718         WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1719
1720         /* enable audio after to setting up hw */
1721         dce_v10_0_audio_enable(adev, dig->afmt->pin, true);
1722 }
1723
1724 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1725 {
1726         struct drm_device *dev = encoder->dev;
1727         struct amdgpu_device *adev = dev->dev_private;
1728         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1729         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1730
1731         if (!dig || !dig->afmt)
1732                 return;
1733
1734         /* Silent, r600_hdmi_enable will raise WARN for us */
1735         if (enable && dig->afmt->enabled)
1736                 return;
1737         if (!enable && !dig->afmt->enabled)
1738                 return;
1739
1740         if (!enable && dig->afmt->pin) {
1741                 dce_v10_0_audio_enable(adev, dig->afmt->pin, false);
1742                 dig->afmt->pin = NULL;
1743         }
1744
1745         dig->afmt->enabled = enable;
1746
1747         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1748                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1749 }
1750
1751 static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
1752 {
1753         int i;
1754
1755         for (i = 0; i < adev->mode_info.num_dig; i++)
1756                 adev->mode_info.afmt[i] = NULL;
1757
1758         /* DCE10 has audio blocks tied to DIG encoders */
1759         for (i = 0; i < adev->mode_info.num_dig; i++) {
1760                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1761                 if (adev->mode_info.afmt[i]) {
1762                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1763                         adev->mode_info.afmt[i]->id = i;
1764                 } else {
1765                         int j;
1766                         for (j = 0; j < i; j++) {
1767                                 kfree(adev->mode_info.afmt[j]);
1768                                 adev->mode_info.afmt[j] = NULL;
1769                         }
1770                         return -ENOMEM;
1771                 }
1772         }
1773         return 0;
1774 }
1775
1776 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
1777 {
1778         int i;
1779
1780         for (i = 0; i < adev->mode_info.num_dig; i++) {
1781                 kfree(adev->mode_info.afmt[i]);
1782                 adev->mode_info.afmt[i] = NULL;
1783         }
1784 }
1785
1786 static const u32 vga_control_regs[6] =
1787 {
1788         mmD1VGA_CONTROL,
1789         mmD2VGA_CONTROL,
1790         mmD3VGA_CONTROL,
1791         mmD4VGA_CONTROL,
1792         mmD5VGA_CONTROL,
1793         mmD6VGA_CONTROL,
1794 };
1795
1796 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
1797 {
1798         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1799         struct drm_device *dev = crtc->dev;
1800         struct amdgpu_device *adev = dev->dev_private;
1801         u32 vga_control;
1802
1803         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1804         if (enable)
1805                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1806         else
1807                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1808 }
1809
1810 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
1811 {
1812         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1813         struct drm_device *dev = crtc->dev;
1814         struct amdgpu_device *adev = dev->dev_private;
1815
1816         if (enable)
1817                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1818         else
1819                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1820 }
1821
1822 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
1823                                      struct drm_framebuffer *fb,
1824                                      int x, int y, int atomic)
1825 {
1826         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1827         struct drm_device *dev = crtc->dev;
1828         struct amdgpu_device *adev = dev->dev_private;
1829         struct drm_framebuffer *target_fb;
1830         struct drm_gem_object *obj;
1831         struct amdgpu_bo *abo;
1832         uint64_t fb_location, tiling_flags;
1833         uint32_t fb_format, fb_pitch_pixels;
1834         u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1835         u32 pipe_config;
1836         u32 tmp, viewport_w, viewport_h;
1837         int r;
1838         bool bypass_lut = false;
1839         struct drm_format_name_buf format_name;
1840
1841         /* no fb bound */
1842         if (!atomic && !crtc->primary->fb) {
1843                 DRM_DEBUG_KMS("No FB bound\n");
1844                 return 0;
1845         }
1846
1847         if (atomic)
1848                 target_fb = fb;
1849         else
1850                 target_fb = crtc->primary->fb;
1851
1852         /* If atomic, assume fb object is pinned & idle & fenced and
1853          * just update base pointers
1854          */
1855         obj = target_fb->obj[0];
1856         abo = gem_to_amdgpu_bo(obj);
1857         r = amdgpu_bo_reserve(abo, false);
1858         if (unlikely(r != 0))
1859                 return r;
1860
1861         if (!atomic) {
1862                 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1863                 if (unlikely(r != 0)) {
1864                         amdgpu_bo_unreserve(abo);
1865                         return -EINVAL;
1866                 }
1867         }
1868         fb_location = amdgpu_bo_gpu_offset(abo);
1869
1870         amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1871         amdgpu_bo_unreserve(abo);
1872
1873         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1874
1875         switch (target_fb->format->format) {
1876         case DRM_FORMAT_C8:
1877                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1878                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1879                 break;
1880         case DRM_FORMAT_XRGB4444:
1881         case DRM_FORMAT_ARGB4444:
1882                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1883                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1884 #ifdef __BIG_ENDIAN
1885                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1886                                         ENDIAN_8IN16);
1887 #endif
1888                 break;
1889         case DRM_FORMAT_XRGB1555:
1890         case DRM_FORMAT_ARGB1555:
1891                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1892                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1893 #ifdef __BIG_ENDIAN
1894                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1895                                         ENDIAN_8IN16);
1896 #endif
1897                 break;
1898         case DRM_FORMAT_BGRX5551:
1899         case DRM_FORMAT_BGRA5551:
1900                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1901                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1902 #ifdef __BIG_ENDIAN
1903                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1904                                         ENDIAN_8IN16);
1905 #endif
1906                 break;
1907         case DRM_FORMAT_RGB565:
1908                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1909                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1910 #ifdef __BIG_ENDIAN
1911                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1912                                         ENDIAN_8IN16);
1913 #endif
1914                 break;
1915         case DRM_FORMAT_XRGB8888:
1916         case DRM_FORMAT_ARGB8888:
1917                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1918                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1919 #ifdef __BIG_ENDIAN
1920                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1921                                         ENDIAN_8IN32);
1922 #endif
1923                 break;
1924         case DRM_FORMAT_XRGB2101010:
1925         case DRM_FORMAT_ARGB2101010:
1926                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1927                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1928 #ifdef __BIG_ENDIAN
1929                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1930                                         ENDIAN_8IN32);
1931 #endif
1932                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1933                 bypass_lut = true;
1934                 break;
1935         case DRM_FORMAT_BGRX1010102:
1936         case DRM_FORMAT_BGRA1010102:
1937                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1938                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1939 #ifdef __BIG_ENDIAN
1940                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1941                                         ENDIAN_8IN32);
1942 #endif
1943                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1944                 bypass_lut = true;
1945                 break;
1946         case DRM_FORMAT_XBGR8888:
1947         case DRM_FORMAT_ABGR8888:
1948                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1949                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1950                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
1951                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
1952 #ifdef __BIG_ENDIAN
1953                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1954                                         ENDIAN_8IN32);
1955 #endif
1956                 break;
1957         default:
1958                 DRM_ERROR("Unsupported screen format %s\n",
1959                           drm_get_format_name(target_fb->format->format, &format_name));
1960                 return -EINVAL;
1961         }
1962
1963         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1964                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1965
1966                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1967                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1968                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1969                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1970                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1971
1972                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
1973                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1974                                           ARRAY_2D_TILED_THIN1);
1975                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
1976                                           tile_split);
1977                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
1978                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
1979                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
1980                                           mtaspect);
1981                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
1982                                           ADDR_SURF_MICRO_TILING_DISPLAY);
1983         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1984                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
1985                                           ARRAY_1D_TILED_THIN1);
1986         }
1987
1988         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
1989                                   pipe_config);
1990
1991         dce_v10_0_vga_enable(crtc, false);
1992
1993         /* Make sure surface address is updated at vertical blank rather than
1994          * horizontal blank
1995          */
1996         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
1997         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
1998                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
1999         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2000
2001         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2002                upper_32_bits(fb_location));
2003         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2004                upper_32_bits(fb_location));
2005         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2006                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2007         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2008                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2009         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2010         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2011
2012         /*
2013          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2014          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2015          * retain the full precision throughout the pipeline.
2016          */
2017         tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2018         if (bypass_lut)
2019                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2020         else
2021                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2022         WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2023
2024         if (bypass_lut)
2025                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2026
2027         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2028         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2029         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2030         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2031         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2032         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2033
2034         fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2035         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2036
2037         dce_v10_0_grph_enable(crtc, true);
2038
2039         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2040                target_fb->height);
2041
2042         x &= ~3;
2043         y &= ~1;
2044         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2045                (x << 16) | y);
2046         viewport_w = crtc->mode.hdisplay;
2047         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2048         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2049                (viewport_w << 16) | viewport_h);
2050
2051         /* set pageflip to happen anywhere in vblank interval */
2052         WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2053
2054         if (!atomic && fb && fb != crtc->primary->fb) {
2055                 abo = gem_to_amdgpu_bo(fb->obj[0]);
2056                 r = amdgpu_bo_reserve(abo, true);
2057                 if (unlikely(r != 0))
2058                         return r;
2059                 amdgpu_bo_unpin(abo);
2060                 amdgpu_bo_unreserve(abo);
2061         }
2062
2063         /* Bytes per pixel may have changed */
2064         dce_v10_0_bandwidth_update(adev);
2065
2066         return 0;
2067 }
2068
2069 static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
2070                                      struct drm_display_mode *mode)
2071 {
2072         struct drm_device *dev = crtc->dev;
2073         struct amdgpu_device *adev = dev->dev_private;
2074         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2075         u32 tmp;
2076
2077         tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2078         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2079                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2080         else
2081                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2082         WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2083 }
2084
2085 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
2086 {
2087         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2088         struct drm_device *dev = crtc->dev;
2089         struct amdgpu_device *adev = dev->dev_private;
2090         u16 *r, *g, *b;
2091         int i;
2092         u32 tmp;
2093
2094         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2095
2096         tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2097         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2098         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0);
2099         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2100
2101         tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2102         tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2103         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2104
2105         tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset);
2106         tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1);
2107         WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2108
2109         tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2110         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2111         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0);
2112         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2113
2114         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2115
2116         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2117         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2118         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2119
2120         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2121         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2122         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2123
2124         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2125         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2126
2127         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2128         r = crtc->gamma_store;
2129         g = r + crtc->gamma_size;
2130         b = g + crtc->gamma_size;
2131         for (i = 0; i < 256; i++) {
2132                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2133                        ((*r++ & 0xffc0) << 14) |
2134                        ((*g++ & 0xffc0) << 4) |
2135                        (*b++ >> 6));
2136         }
2137
2138         tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2139         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2140         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0);
2141         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2142         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2143
2144         tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2145         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2146         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0);
2147         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2148
2149         tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2150         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2151         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0);
2152         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2153
2154         tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2155         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2156         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0);
2157         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2158
2159         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2160         WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2161         /* XXX this only needs to be programmed once per crtc at startup,
2162          * not sure where the best place for it is
2163          */
2164         tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2165         tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2166         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2167 }
2168
2169 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
2170 {
2171         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2172         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2173
2174         switch (amdgpu_encoder->encoder_id) {
2175         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2176                 if (dig->linkb)
2177                         return 1;
2178                 else
2179                         return 0;
2180                 break;
2181         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2182                 if (dig->linkb)
2183                         return 3;
2184                 else
2185                         return 2;
2186                 break;
2187         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2188                 if (dig->linkb)
2189                         return 5;
2190                 else
2191                         return 4;
2192                 break;
2193         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2194                 return 6;
2195                 break;
2196         default:
2197                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2198                 return 0;
2199         }
2200 }
2201
2202 /**
2203  * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc.
2204  *
2205  * @crtc: drm crtc
2206  *
2207  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2208  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2209  * monitors a dedicated PPLL must be used.  If a particular board has
2210  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2211  * as there is no need to program the PLL itself.  If we are not able to
2212  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2213  * avoid messing up an existing monitor.
2214  *
2215  * Asic specific PLL information
2216  *
2217  * DCE 10.x
2218  * Tonga
2219  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2220  * CI
2221  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2222  *
2223  */
2224 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
2225 {
2226         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2227         struct drm_device *dev = crtc->dev;
2228         struct amdgpu_device *adev = dev->dev_private;
2229         u32 pll_in_use;
2230         int pll;
2231
2232         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2233                 if (adev->clock.dp_extclk)
2234                         /* skip PPLL programming if using ext clock */
2235                         return ATOM_PPLL_INVALID;
2236                 else {
2237                         /* use the same PPLL for all DP monitors */
2238                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2239                         if (pll != ATOM_PPLL_INVALID)
2240                                 return pll;
2241                 }
2242         } else {
2243                 /* use the same PPLL for all monitors with the same clock */
2244                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2245                 if (pll != ATOM_PPLL_INVALID)
2246                         return pll;
2247         }
2248
2249         /* DCE10 has PPLL0, PPLL1, and PPLL2 */
2250         pll_in_use = amdgpu_pll_get_use_mask(crtc);
2251         if (!(pll_in_use & (1 << ATOM_PPLL2)))
2252                 return ATOM_PPLL2;
2253         if (!(pll_in_use & (1 << ATOM_PPLL1)))
2254                 return ATOM_PPLL1;
2255         if (!(pll_in_use & (1 << ATOM_PPLL0)))
2256                 return ATOM_PPLL0;
2257         DRM_ERROR("unable to allocate a PPLL\n");
2258         return ATOM_PPLL_INVALID;
2259 }
2260
2261 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2262 {
2263         struct amdgpu_device *adev = crtc->dev->dev_private;
2264         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2265         uint32_t cur_lock;
2266
2267         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2268         if (lock)
2269                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2270         else
2271                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2272         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2273 }
2274
2275 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
2276 {
2277         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2278         struct amdgpu_device *adev = crtc->dev->dev_private;
2279         u32 tmp;
2280
2281         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2282         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2283         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2284 }
2285
2286 static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
2287 {
2288         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2289         struct amdgpu_device *adev = crtc->dev->dev_private;
2290         u32 tmp;
2291
2292         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2293                upper_32_bits(amdgpu_crtc->cursor_addr));
2294         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2295                lower_32_bits(amdgpu_crtc->cursor_addr));
2296
2297         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2298         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2299         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2300         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2301 }
2302
2303 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
2304                                         int x, int y)
2305 {
2306         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2307         struct amdgpu_device *adev = crtc->dev->dev_private;
2308         int xorigin = 0, yorigin = 0;
2309
2310         amdgpu_crtc->cursor_x = x;
2311         amdgpu_crtc->cursor_y = y;
2312
2313         /* avivo cursor are offset into the total surface */
2314         x += crtc->x;
2315         y += crtc->y;
2316         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2317
2318         if (x < 0) {
2319                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2320                 x = 0;
2321         }
2322         if (y < 0) {
2323                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2324                 y = 0;
2325         }
2326
2327         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2328         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2329         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2330                ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2331
2332         return 0;
2333 }
2334
2335 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc,
2336                                       int x, int y)
2337 {
2338         int ret;
2339
2340         dce_v10_0_lock_cursor(crtc, true);
2341         ret = dce_v10_0_cursor_move_locked(crtc, x, y);
2342         dce_v10_0_lock_cursor(crtc, false);
2343
2344         return ret;
2345 }
2346
2347 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
2348                                       struct drm_file *file_priv,
2349                                       uint32_t handle,
2350                                       uint32_t width,
2351                                       uint32_t height,
2352                                       int32_t hot_x,
2353                                       int32_t hot_y)
2354 {
2355         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2356         struct drm_gem_object *obj;
2357         struct amdgpu_bo *aobj;
2358         int ret;
2359
2360         if (!handle) {
2361                 /* turn off cursor */
2362                 dce_v10_0_hide_cursor(crtc);
2363                 obj = NULL;
2364                 goto unpin;
2365         }
2366
2367         if ((width > amdgpu_crtc->max_cursor_width) ||
2368             (height > amdgpu_crtc->max_cursor_height)) {
2369                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2370                 return -EINVAL;
2371         }
2372
2373         obj = drm_gem_object_lookup(file_priv, handle);
2374         if (!obj) {
2375                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2376                 return -ENOENT;
2377         }
2378
2379         aobj = gem_to_amdgpu_bo(obj);
2380         ret = amdgpu_bo_reserve(aobj, false);
2381         if (ret != 0) {
2382                 drm_gem_object_put_unlocked(obj);
2383                 return ret;
2384         }
2385
2386         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2387         amdgpu_bo_unreserve(aobj);
2388         if (ret) {
2389                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2390                 drm_gem_object_put_unlocked(obj);
2391                 return ret;
2392         }
2393         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2394
2395         dce_v10_0_lock_cursor(crtc, true);
2396
2397         if (width != amdgpu_crtc->cursor_width ||
2398             height != amdgpu_crtc->cursor_height ||
2399             hot_x != amdgpu_crtc->cursor_hot_x ||
2400             hot_y != amdgpu_crtc->cursor_hot_y) {
2401                 int x, y;
2402
2403                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2404                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2405
2406                 dce_v10_0_cursor_move_locked(crtc, x, y);
2407
2408                 amdgpu_crtc->cursor_width = width;
2409                 amdgpu_crtc->cursor_height = height;
2410                 amdgpu_crtc->cursor_hot_x = hot_x;
2411                 amdgpu_crtc->cursor_hot_y = hot_y;
2412         }
2413
2414         dce_v10_0_show_cursor(crtc);
2415         dce_v10_0_lock_cursor(crtc, false);
2416
2417 unpin:
2418         if (amdgpu_crtc->cursor_bo) {
2419                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2420                 ret = amdgpu_bo_reserve(aobj, true);
2421                 if (likely(ret == 0)) {
2422                         amdgpu_bo_unpin(aobj);
2423                         amdgpu_bo_unreserve(aobj);
2424                 }
2425                 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2426         }
2427
2428         amdgpu_crtc->cursor_bo = obj;
2429         return 0;
2430 }
2431
2432 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2433 {
2434         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2435
2436         if (amdgpu_crtc->cursor_bo) {
2437                 dce_v10_0_lock_cursor(crtc, true);
2438
2439                 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2440                                              amdgpu_crtc->cursor_y);
2441
2442                 dce_v10_0_show_cursor(crtc);
2443
2444                 dce_v10_0_lock_cursor(crtc, false);
2445         }
2446 }
2447
2448 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2449                                     u16 *blue, uint32_t size,
2450                                     struct drm_modeset_acquire_ctx *ctx)
2451 {
2452         dce_v10_0_crtc_load_lut(crtc);
2453
2454         return 0;
2455 }
2456
2457 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
2458 {
2459         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2460
2461         drm_crtc_cleanup(crtc);
2462         kfree(amdgpu_crtc);
2463 }
2464
2465 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
2466         .cursor_set2 = dce_v10_0_crtc_cursor_set2,
2467         .cursor_move = dce_v10_0_crtc_cursor_move,
2468         .gamma_set = dce_v10_0_crtc_gamma_set,
2469         .set_config = amdgpu_display_crtc_set_config,
2470         .destroy = dce_v10_0_crtc_destroy,
2471         .page_flip_target = amdgpu_display_crtc_page_flip_target,
2472 };
2473
2474 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2475 {
2476         struct drm_device *dev = crtc->dev;
2477         struct amdgpu_device *adev = dev->dev_private;
2478         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2479         unsigned type;
2480
2481         switch (mode) {
2482         case DRM_MODE_DPMS_ON:
2483                 amdgpu_crtc->enabled = true;
2484                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2485                 dce_v10_0_vga_enable(crtc, true);
2486                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2487                 dce_v10_0_vga_enable(crtc, false);
2488                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2489                 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2490                                                 amdgpu_crtc->crtc_id);
2491                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2492                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2493                 drm_crtc_vblank_on(crtc);
2494                 dce_v10_0_crtc_load_lut(crtc);
2495                 break;
2496         case DRM_MODE_DPMS_STANDBY:
2497         case DRM_MODE_DPMS_SUSPEND:
2498         case DRM_MODE_DPMS_OFF:
2499                 drm_crtc_vblank_off(crtc);
2500                 if (amdgpu_crtc->enabled) {
2501                         dce_v10_0_vga_enable(crtc, true);
2502                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2503                         dce_v10_0_vga_enable(crtc, false);
2504                 }
2505                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2506                 amdgpu_crtc->enabled = false;
2507                 break;
2508         }
2509         /* adjust pm to dpms */
2510         amdgpu_pm_compute_clocks(adev);
2511 }
2512
2513 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
2514 {
2515         /* disable crtc pair power gating before programming */
2516         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2517         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2518         dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2519 }
2520
2521 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc)
2522 {
2523         dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2524         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2525 }
2526
2527 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
2528 {
2529         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2530         struct drm_device *dev = crtc->dev;
2531         struct amdgpu_device *adev = dev->dev_private;
2532         struct amdgpu_atom_ss ss;
2533         int i;
2534
2535         dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2536         if (crtc->primary->fb) {
2537                 int r;
2538                 struct amdgpu_bo *abo;
2539
2540                 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2541                 r = amdgpu_bo_reserve(abo, true);
2542                 if (unlikely(r))
2543                         DRM_ERROR("failed to reserve abo before unpin\n");
2544                 else {
2545                         amdgpu_bo_unpin(abo);
2546                         amdgpu_bo_unreserve(abo);
2547                 }
2548         }
2549         /* disable the GRPH */
2550         dce_v10_0_grph_enable(crtc, false);
2551
2552         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2553
2554         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2555                 if (adev->mode_info.crtcs[i] &&
2556                     adev->mode_info.crtcs[i]->enabled &&
2557                     i != amdgpu_crtc->crtc_id &&
2558                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2559                         /* one other crtc is using this pll don't turn
2560                          * off the pll
2561                          */
2562                         goto done;
2563                 }
2564         }
2565
2566         switch (amdgpu_crtc->pll_id) {
2567         case ATOM_PPLL0:
2568         case ATOM_PPLL1:
2569         case ATOM_PPLL2:
2570                 /* disable the ppll */
2571                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2572                                           0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2573                 break;
2574         default:
2575                 break;
2576         }
2577 done:
2578         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2579         amdgpu_crtc->adjusted_clock = 0;
2580         amdgpu_crtc->encoder = NULL;
2581         amdgpu_crtc->connector = NULL;
2582 }
2583
2584 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc,
2585                                   struct drm_display_mode *mode,
2586                                   struct drm_display_mode *adjusted_mode,
2587                                   int x, int y, struct drm_framebuffer *old_fb)
2588 {
2589         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2590
2591         if (!amdgpu_crtc->adjusted_clock)
2592                 return -EINVAL;
2593
2594         amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2595         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2596         dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2597         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2598         amdgpu_atombios_crtc_scaler_setup(crtc);
2599         dce_v10_0_cursor_reset(crtc);
2600         /* update the hw version fpr dpm */
2601         amdgpu_crtc->hw_mode = *adjusted_mode;
2602
2603         return 0;
2604 }
2605
2606 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
2607                                      const struct drm_display_mode *mode,
2608                                      struct drm_display_mode *adjusted_mode)
2609 {
2610         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2611         struct drm_device *dev = crtc->dev;
2612         struct drm_encoder *encoder;
2613
2614         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2615         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2616                 if (encoder->crtc == crtc) {
2617                         amdgpu_crtc->encoder = encoder;
2618                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2619                         break;
2620                 }
2621         }
2622         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2623                 amdgpu_crtc->encoder = NULL;
2624                 amdgpu_crtc->connector = NULL;
2625                 return false;
2626         }
2627         if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2628                 return false;
2629         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2630                 return false;
2631         /* pick pll */
2632         amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc);
2633         /* if we can't get a PPLL for a non-DP encoder, fail */
2634         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2635             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2636                 return false;
2637
2638         return true;
2639 }
2640
2641 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2642                                   struct drm_framebuffer *old_fb)
2643 {
2644         return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2645 }
2646
2647 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2648                                          struct drm_framebuffer *fb,
2649                                          int x, int y, enum mode_set_atomic state)
2650 {
2651        return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
2652 }
2653
2654 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
2655         .dpms = dce_v10_0_crtc_dpms,
2656         .mode_fixup = dce_v10_0_crtc_mode_fixup,
2657         .mode_set = dce_v10_0_crtc_mode_set,
2658         .mode_set_base = dce_v10_0_crtc_set_base,
2659         .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
2660         .prepare = dce_v10_0_crtc_prepare,
2661         .commit = dce_v10_0_crtc_commit,
2662         .disable = dce_v10_0_crtc_disable,
2663 };
2664
2665 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
2666 {
2667         struct amdgpu_crtc *amdgpu_crtc;
2668
2669         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2670                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2671         if (amdgpu_crtc == NULL)
2672                 return -ENOMEM;
2673
2674         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
2675
2676         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2677         amdgpu_crtc->crtc_id = index;
2678         adev->mode_info.crtcs[index] = amdgpu_crtc;
2679
2680         amdgpu_crtc->max_cursor_width = 128;
2681         amdgpu_crtc->max_cursor_height = 128;
2682         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2683         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2684
2685         switch (amdgpu_crtc->crtc_id) {
2686         case 0:
2687         default:
2688                 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2689                 break;
2690         case 1:
2691                 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2692                 break;
2693         case 2:
2694                 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2695                 break;
2696         case 3:
2697                 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2698                 break;
2699         case 4:
2700                 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2701                 break;
2702         case 5:
2703                 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2704                 break;
2705         }
2706
2707         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2708         amdgpu_crtc->adjusted_clock = 0;
2709         amdgpu_crtc->encoder = NULL;
2710         amdgpu_crtc->connector = NULL;
2711         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
2712
2713         return 0;
2714 }
2715
2716 static int dce_v10_0_early_init(void *handle)
2717 {
2718         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2719
2720         adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
2721         adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
2722
2723         dce_v10_0_set_display_funcs(adev);
2724
2725         adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
2726
2727         switch (adev->asic_type) {
2728         case CHIP_FIJI:
2729         case CHIP_TONGA:
2730                 adev->mode_info.num_hpd = 6;
2731                 adev->mode_info.num_dig = 7;
2732                 break;
2733         default:
2734                 /* FIXME: not supported yet */
2735                 return -EINVAL;
2736         }
2737
2738         dce_v10_0_set_irq_funcs(adev);
2739
2740         return 0;
2741 }
2742
2743 static int dce_v10_0_sw_init(void *handle)
2744 {
2745         int r, i;
2746         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2747
2748         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2749                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2750                 if (r)
2751                         return r;
2752         }
2753
2754         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2755                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2756                 if (r)
2757                         return r;
2758         }
2759
2760         /* HPD hotplug */
2761         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2762         if (r)
2763                 return r;
2764
2765         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2766
2767         adev->ddev->mode_config.async_page_flip = true;
2768
2769         adev->ddev->mode_config.max_width = 16384;
2770         adev->ddev->mode_config.max_height = 16384;
2771
2772         adev->ddev->mode_config.preferred_depth = 24;
2773         adev->ddev->mode_config.prefer_shadow = 1;
2774
2775         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2776
2777         r = amdgpu_display_modeset_create_props(adev);
2778         if (r)
2779                 return r;
2780
2781         adev->ddev->mode_config.max_width = 16384;
2782         adev->ddev->mode_config.max_height = 16384;
2783
2784         /* allocate crtcs */
2785         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2786                 r = dce_v10_0_crtc_init(adev, i);
2787                 if (r)
2788                         return r;
2789         }
2790
2791         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2792                 amdgpu_display_print_display_setup(adev->ddev);
2793         else
2794                 return -EINVAL;
2795
2796         /* setup afmt */
2797         r = dce_v10_0_afmt_init(adev);
2798         if (r)
2799                 return r;
2800
2801         r = dce_v10_0_audio_init(adev);
2802         if (r)
2803                 return r;
2804
2805         drm_kms_helper_poll_init(adev->ddev);
2806
2807         adev->mode_info.mode_config_initialized = true;
2808         return 0;
2809 }
2810
2811 static int dce_v10_0_sw_fini(void *handle)
2812 {
2813         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2814
2815         kfree(adev->mode_info.bios_hardcoded_edid);
2816
2817         drm_kms_helper_poll_fini(adev->ddev);
2818
2819         dce_v10_0_audio_fini(adev);
2820
2821         dce_v10_0_afmt_fini(adev);
2822
2823         drm_mode_config_cleanup(adev->ddev);
2824         adev->mode_info.mode_config_initialized = false;
2825
2826         return 0;
2827 }
2828
2829 static int dce_v10_0_hw_init(void *handle)
2830 {
2831         int i;
2832         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2833
2834         dce_v10_0_init_golden_registers(adev);
2835
2836         /* disable vga render */
2837         dce_v10_0_set_vga_render_state(adev, false);
2838         /* init dig PHYs, disp eng pll */
2839         amdgpu_atombios_encoder_init_dig(adev);
2840         amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2841
2842         /* initialize hpd */
2843         dce_v10_0_hpd_init(adev);
2844
2845         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2846                 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2847         }
2848
2849         dce_v10_0_pageflip_interrupt_init(adev);
2850
2851         return 0;
2852 }
2853
2854 static int dce_v10_0_hw_fini(void *handle)
2855 {
2856         int i;
2857         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2858
2859         dce_v10_0_hpd_fini(adev);
2860
2861         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2862                 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2863         }
2864
2865         dce_v10_0_pageflip_interrupt_fini(adev);
2866
2867         return 0;
2868 }
2869
2870 static int dce_v10_0_suspend(void *handle)
2871 {
2872         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2873
2874         adev->mode_info.bl_level =
2875                 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2876
2877         return dce_v10_0_hw_fini(handle);
2878 }
2879
2880 static int dce_v10_0_resume(void *handle)
2881 {
2882         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2883         int ret;
2884
2885         amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2886                                                            adev->mode_info.bl_level);
2887
2888         ret = dce_v10_0_hw_init(handle);
2889
2890         /* turn on the BL */
2891         if (adev->mode_info.bl_encoder) {
2892                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2893                                                                   adev->mode_info.bl_encoder);
2894                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2895                                                     bl_level);
2896         }
2897
2898         return ret;
2899 }
2900
2901 static bool dce_v10_0_is_idle(void *handle)
2902 {
2903         return true;
2904 }
2905
2906 static int dce_v10_0_wait_for_idle(void *handle)
2907 {
2908         return 0;
2909 }
2910
2911 static bool dce_v10_0_check_soft_reset(void *handle)
2912 {
2913         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2914
2915         return dce_v10_0_is_display_hung(adev);
2916 }
2917
2918 static int dce_v10_0_soft_reset(void *handle)
2919 {
2920         u32 srbm_soft_reset = 0, tmp;
2921         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2922
2923         if (dce_v10_0_is_display_hung(adev))
2924                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2925
2926         if (srbm_soft_reset) {
2927                 tmp = RREG32(mmSRBM_SOFT_RESET);
2928                 tmp |= srbm_soft_reset;
2929                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2930                 WREG32(mmSRBM_SOFT_RESET, tmp);
2931                 tmp = RREG32(mmSRBM_SOFT_RESET);
2932
2933                 udelay(50);
2934
2935                 tmp &= ~srbm_soft_reset;
2936                 WREG32(mmSRBM_SOFT_RESET, tmp);
2937                 tmp = RREG32(mmSRBM_SOFT_RESET);
2938
2939                 /* Wait a little for things to settle down */
2940                 udelay(50);
2941         }
2942         return 0;
2943 }
2944
2945 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2946                                                      int crtc,
2947                                                      enum amdgpu_interrupt_state state)
2948 {
2949         u32 lb_interrupt_mask;
2950
2951         if (crtc >= adev->mode_info.num_crtc) {
2952                 DRM_DEBUG("invalid crtc %d\n", crtc);
2953                 return;
2954         }
2955
2956         switch (state) {
2957         case AMDGPU_IRQ_STATE_DISABLE:
2958                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2959                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2960                                                   VBLANK_INTERRUPT_MASK, 0);
2961                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2962                 break;
2963         case AMDGPU_IRQ_STATE_ENABLE:
2964                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2965                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2966                                                   VBLANK_INTERRUPT_MASK, 1);
2967                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2968                 break;
2969         default:
2970                 break;
2971         }
2972 }
2973
2974 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2975                                                     int crtc,
2976                                                     enum amdgpu_interrupt_state state)
2977 {
2978         u32 lb_interrupt_mask;
2979
2980         if (crtc >= adev->mode_info.num_crtc) {
2981                 DRM_DEBUG("invalid crtc %d\n", crtc);
2982                 return;
2983         }
2984
2985         switch (state) {
2986         case AMDGPU_IRQ_STATE_DISABLE:
2987                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2988                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2989                                                   VLINE_INTERRUPT_MASK, 0);
2990                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2991                 break;
2992         case AMDGPU_IRQ_STATE_ENABLE:
2993                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
2994                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
2995                                                   VLINE_INTERRUPT_MASK, 1);
2996                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
2997                 break;
2998         default:
2999                 break;
3000         }
3001 }
3002
3003 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
3004                                        struct amdgpu_irq_src *source,
3005                                        unsigned hpd,
3006                                        enum amdgpu_interrupt_state state)
3007 {
3008         u32 tmp;
3009
3010         if (hpd >= adev->mode_info.num_hpd) {
3011                 DRM_DEBUG("invalid hdp %d\n", hpd);
3012                 return 0;
3013         }
3014
3015         switch (state) {
3016         case AMDGPU_IRQ_STATE_DISABLE:
3017                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3018                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3019                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3020                 break;
3021         case AMDGPU_IRQ_STATE_ENABLE:
3022                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3023                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3024                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3025                 break;
3026         default:
3027                 break;
3028         }
3029
3030         return 0;
3031 }
3032
3033 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev,
3034                                         struct amdgpu_irq_src *source,
3035                                         unsigned type,
3036                                         enum amdgpu_interrupt_state state)
3037 {
3038         switch (type) {
3039         case AMDGPU_CRTC_IRQ_VBLANK1:
3040                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3041                 break;
3042         case AMDGPU_CRTC_IRQ_VBLANK2:
3043                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3044                 break;
3045         case AMDGPU_CRTC_IRQ_VBLANK3:
3046                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3047                 break;
3048         case AMDGPU_CRTC_IRQ_VBLANK4:
3049                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3050                 break;
3051         case AMDGPU_CRTC_IRQ_VBLANK5:
3052                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3053                 break;
3054         case AMDGPU_CRTC_IRQ_VBLANK6:
3055                 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3056                 break;
3057         case AMDGPU_CRTC_IRQ_VLINE1:
3058                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state);
3059                 break;
3060         case AMDGPU_CRTC_IRQ_VLINE2:
3061                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state);
3062                 break;
3063         case AMDGPU_CRTC_IRQ_VLINE3:
3064                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state);
3065                 break;
3066         case AMDGPU_CRTC_IRQ_VLINE4:
3067                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state);
3068                 break;
3069         case AMDGPU_CRTC_IRQ_VLINE5:
3070                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state);
3071                 break;
3072         case AMDGPU_CRTC_IRQ_VLINE6:
3073                 dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state);
3074                 break;
3075         default:
3076                 break;
3077         }
3078         return 0;
3079 }
3080
3081 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3082                                             struct amdgpu_irq_src *src,
3083                                             unsigned type,
3084                                             enum amdgpu_interrupt_state state)
3085 {
3086         u32 reg;
3087
3088         if (type >= adev->mode_info.num_crtc) {
3089                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3090                 return -EINVAL;
3091         }
3092
3093         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3094         if (state == AMDGPU_IRQ_STATE_DISABLE)
3095                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3096                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3097         else
3098                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3099                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3100
3101         return 0;
3102 }
3103
3104 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3105                                   struct amdgpu_irq_src *source,
3106                                   struct amdgpu_iv_entry *entry)
3107 {
3108         unsigned long flags;
3109         unsigned crtc_id;
3110         struct amdgpu_crtc *amdgpu_crtc;
3111         struct amdgpu_flip_work *works;
3112
3113         crtc_id = (entry->src_id - 8) >> 1;
3114         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3115
3116         if (crtc_id >= adev->mode_info.num_crtc) {
3117                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3118                 return -EINVAL;
3119         }
3120
3121         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3122             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3123                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3124                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3125
3126         /* IRQ could occur when in initial stage */
3127         if (amdgpu_crtc == NULL)
3128                 return 0;
3129
3130         spin_lock_irqsave(&adev->ddev->event_lock, flags);
3131         works = amdgpu_crtc->pflip_works;
3132         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3133                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3134                                                  "AMDGPU_FLIP_SUBMITTED(%d)\n",
3135                                                  amdgpu_crtc->pflip_status,
3136                                                  AMDGPU_FLIP_SUBMITTED);
3137                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3138                 return 0;
3139         }
3140
3141         /* page flip completed. clean up */
3142         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3143         amdgpu_crtc->pflip_works = NULL;
3144
3145         /* wakeup usersapce */
3146         if (works->event)
3147                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3148
3149         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3150
3151         drm_crtc_vblank_put(&amdgpu_crtc->base);
3152         schedule_work(&works->unpin_work);
3153
3154         return 0;
3155 }
3156
3157 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
3158                                   int hpd)
3159 {
3160         u32 tmp;
3161
3162         if (hpd >= adev->mode_info.num_hpd) {
3163                 DRM_DEBUG("invalid hdp %d\n", hpd);
3164                 return;
3165         }
3166
3167         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3168         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3169         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3170 }
3171
3172 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3173                                           int crtc)
3174 {
3175         u32 tmp;
3176
3177         if (crtc >= adev->mode_info.num_crtc) {
3178                 DRM_DEBUG("invalid crtc %d\n", crtc);
3179                 return;
3180         }
3181
3182         tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3183         tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3184         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3185 }
3186
3187 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3188                                          int crtc)
3189 {
3190         u32 tmp;
3191
3192         if (crtc >= adev->mode_info.num_crtc) {
3193                 DRM_DEBUG("invalid crtc %d\n", crtc);
3194                 return;
3195         }
3196
3197         tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3198         tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3199         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3200 }
3201
3202 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3203                               struct amdgpu_irq_src *source,
3204                               struct amdgpu_iv_entry *entry)
3205 {
3206         unsigned crtc = entry->src_id - 1;
3207         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3208         unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
3209
3210         switch (entry->src_data[0]) {
3211         case 0: /* vblank */
3212                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3213                         dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3214                 else
3215                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3216
3217                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3218                         drm_handle_vblank(adev->ddev, crtc);
3219                 }
3220                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3221
3222                 break;
3223         case 1: /* vline */
3224                 if (disp_int & interrupt_status_offsets[crtc].vline)
3225                         dce_v10_0_crtc_vline_int_ack(adev, crtc);
3226                 else
3227                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3228
3229                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3230
3231                 break;
3232         default:
3233                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3234                 break;
3235         }
3236
3237         return 0;
3238 }
3239
3240 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
3241                              struct amdgpu_irq_src *source,
3242                              struct amdgpu_iv_entry *entry)
3243 {
3244         uint32_t disp_int, mask;
3245         unsigned hpd;
3246
3247         if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3248                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3249                 return 0;
3250         }
3251
3252         hpd = entry->src_data[0];
3253         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3254         mask = interrupt_status_offsets[hpd].hpd;
3255
3256         if (disp_int & mask) {
3257                 dce_v10_0_hpd_int_ack(adev, hpd);
3258                 schedule_work(&adev->hotplug_work);
3259                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3260         }
3261
3262         return 0;
3263 }
3264
3265 static int dce_v10_0_set_clockgating_state(void *handle,
3266                                           enum amd_clockgating_state state)
3267 {
3268         return 0;
3269 }
3270
3271 static int dce_v10_0_set_powergating_state(void *handle,
3272                                           enum amd_powergating_state state)
3273 {
3274         return 0;
3275 }
3276
3277 static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
3278         .name = "dce_v10_0",
3279         .early_init = dce_v10_0_early_init,
3280         .late_init = NULL,
3281         .sw_init = dce_v10_0_sw_init,
3282         .sw_fini = dce_v10_0_sw_fini,
3283         .hw_init = dce_v10_0_hw_init,
3284         .hw_fini = dce_v10_0_hw_fini,
3285         .suspend = dce_v10_0_suspend,
3286         .resume = dce_v10_0_resume,
3287         .is_idle = dce_v10_0_is_idle,
3288         .wait_for_idle = dce_v10_0_wait_for_idle,
3289         .check_soft_reset = dce_v10_0_check_soft_reset,
3290         .soft_reset = dce_v10_0_soft_reset,
3291         .set_clockgating_state = dce_v10_0_set_clockgating_state,
3292         .set_powergating_state = dce_v10_0_set_powergating_state,
3293 };
3294
3295 static void
3296 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
3297                           struct drm_display_mode *mode,
3298                           struct drm_display_mode *adjusted_mode)
3299 {
3300         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3301
3302         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3303
3304         /* need to call this here rather than in prepare() since we need some crtc info */
3305         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3306
3307         /* set scaler clears this on some chips */
3308         dce_v10_0_set_interleave(encoder->crtc, mode);
3309
3310         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3311                 dce_v10_0_afmt_enable(encoder, true);
3312                 dce_v10_0_afmt_setmode(encoder, adjusted_mode);
3313         }
3314 }
3315
3316 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
3317 {
3318         struct amdgpu_device *adev = encoder->dev->dev_private;
3319         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3320         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3321
3322         if ((amdgpu_encoder->active_device &
3323              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3324             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3325              ENCODER_OBJECT_ID_NONE)) {
3326                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3327                 if (dig) {
3328                         dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder);
3329                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3330                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3331                 }
3332         }
3333
3334         amdgpu_atombios_scratch_regs_lock(adev, true);
3335
3336         if (connector) {
3337                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3338
3339                 /* select the clock/data port if it uses a router */
3340                 if (amdgpu_connector->router.cd_valid)
3341                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3342
3343                 /* turn eDP panel on for mode set */
3344                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3345                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3346                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3347         }
3348
3349         /* this is needed for the pll/ss setup to work correctly in some cases */
3350         amdgpu_atombios_encoder_set_crtc_source(encoder);
3351         /* set up the FMT blocks */
3352         dce_v10_0_program_fmt(encoder);
3353 }
3354
3355 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
3356 {
3357         struct drm_device *dev = encoder->dev;
3358         struct amdgpu_device *adev = dev->dev_private;
3359
3360         /* need to call this here as we need the crtc set up */
3361         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3362         amdgpu_atombios_scratch_regs_lock(adev, false);
3363 }
3364
3365 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder)
3366 {
3367         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3368         struct amdgpu_encoder_atom_dig *dig;
3369
3370         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3371
3372         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3373                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3374                         dce_v10_0_afmt_enable(encoder, false);
3375                 dig = amdgpu_encoder->enc_priv;
3376                 dig->dig_encoder = -1;
3377         }
3378         amdgpu_encoder->active_device = 0;
3379 }
3380
3381 /* these are handled by the primary encoders */
3382 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder)
3383 {
3384
3385 }
3386
3387 static void dce_v10_0_ext_commit(struct drm_encoder *encoder)
3388 {
3389
3390 }
3391
3392 static void
3393 dce_v10_0_ext_mode_set(struct drm_encoder *encoder,
3394                       struct drm_display_mode *mode,
3395                       struct drm_display_mode *adjusted_mode)
3396 {
3397
3398 }
3399
3400 static void dce_v10_0_ext_disable(struct drm_encoder *encoder)
3401 {
3402
3403 }
3404
3405 static void
3406 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode)
3407 {
3408
3409 }
3410
3411 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = {
3412         .dpms = dce_v10_0_ext_dpms,
3413         .prepare = dce_v10_0_ext_prepare,
3414         .mode_set = dce_v10_0_ext_mode_set,
3415         .commit = dce_v10_0_ext_commit,
3416         .disable = dce_v10_0_ext_disable,
3417         /* no detect for TMDS/LVDS yet */
3418 };
3419
3420 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = {
3421         .dpms = amdgpu_atombios_encoder_dpms,
3422         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3423         .prepare = dce_v10_0_encoder_prepare,
3424         .mode_set = dce_v10_0_encoder_mode_set,
3425         .commit = dce_v10_0_encoder_commit,
3426         .disable = dce_v10_0_encoder_disable,
3427         .detect = amdgpu_atombios_encoder_dig_detect,
3428 };
3429
3430 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = {
3431         .dpms = amdgpu_atombios_encoder_dpms,
3432         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3433         .prepare = dce_v10_0_encoder_prepare,
3434         .mode_set = dce_v10_0_encoder_mode_set,
3435         .commit = dce_v10_0_encoder_commit,
3436         .detect = amdgpu_atombios_encoder_dac_detect,
3437 };
3438
3439 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder)
3440 {
3441         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3442         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3443                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3444         kfree(amdgpu_encoder->enc_priv);
3445         drm_encoder_cleanup(encoder);
3446         kfree(amdgpu_encoder);
3447 }
3448
3449 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = {
3450         .destroy = dce_v10_0_encoder_destroy,
3451 };
3452
3453 static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
3454                                  uint32_t encoder_enum,
3455                                  uint32_t supported_device,
3456                                  u16 caps)
3457 {
3458         struct drm_device *dev = adev->ddev;
3459         struct drm_encoder *encoder;
3460         struct amdgpu_encoder *amdgpu_encoder;
3461
3462         /* see if we already added it */
3463         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3464                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3465                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3466                         amdgpu_encoder->devices |= supported_device;
3467                         return;
3468                 }
3469
3470         }
3471
3472         /* add a new one */
3473         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3474         if (!amdgpu_encoder)
3475                 return;
3476
3477         encoder = &amdgpu_encoder->base;
3478         switch (adev->mode_info.num_crtc) {
3479         case 1:
3480                 encoder->possible_crtcs = 0x1;
3481                 break;
3482         case 2:
3483         default:
3484                 encoder->possible_crtcs = 0x3;
3485                 break;
3486         case 4:
3487                 encoder->possible_crtcs = 0xf;
3488                 break;
3489         case 6:
3490                 encoder->possible_crtcs = 0x3f;
3491                 break;
3492         }
3493
3494         amdgpu_encoder->enc_priv = NULL;
3495
3496         amdgpu_encoder->encoder_enum = encoder_enum;
3497         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3498         amdgpu_encoder->devices = supported_device;
3499         amdgpu_encoder->rmx_type = RMX_OFF;
3500         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3501         amdgpu_encoder->is_ext_encoder = false;
3502         amdgpu_encoder->caps = caps;
3503
3504         switch (amdgpu_encoder->encoder_id) {
3505         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3506         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3507                 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3508                                  DRM_MODE_ENCODER_DAC, NULL);
3509                 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs);
3510                 break;
3511         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3512         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3513         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3514         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3515         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3516                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3517                         amdgpu_encoder->rmx_type = RMX_FULL;
3518                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3519                                          DRM_MODE_ENCODER_LVDS, NULL);
3520                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3521                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3522                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3523                                          DRM_MODE_ENCODER_DAC, NULL);
3524                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3525                 } else {
3526                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3527                                          DRM_MODE_ENCODER_TMDS, NULL);
3528                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3529                 }
3530                 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs);
3531                 break;
3532         case ENCODER_OBJECT_ID_SI170B:
3533         case ENCODER_OBJECT_ID_CH7303:
3534         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3535         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3536         case ENCODER_OBJECT_ID_TITFP513:
3537         case ENCODER_OBJECT_ID_VT1623:
3538         case ENCODER_OBJECT_ID_HDMI_SI1930:
3539         case ENCODER_OBJECT_ID_TRAVIS:
3540         case ENCODER_OBJECT_ID_NUTMEG:
3541                 /* these are handled by the primary encoders */
3542                 amdgpu_encoder->is_ext_encoder = true;
3543                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3544                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3545                                          DRM_MODE_ENCODER_LVDS, NULL);
3546                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3547                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3548                                          DRM_MODE_ENCODER_DAC, NULL);
3549                 else
3550                         drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs,
3551                                          DRM_MODE_ENCODER_TMDS, NULL);
3552                 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs);
3553                 break;
3554         }
3555 }
3556
3557 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
3558         .bandwidth_update = &dce_v10_0_bandwidth_update,
3559         .vblank_get_counter = &dce_v10_0_vblank_get_counter,
3560         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3561         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3562         .hpd_sense = &dce_v10_0_hpd_sense,
3563         .hpd_set_polarity = &dce_v10_0_hpd_set_polarity,
3564         .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg,
3565         .page_flip = &dce_v10_0_page_flip,
3566         .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
3567         .add_encoder = &dce_v10_0_encoder_add,
3568         .add_connector = &amdgpu_connector_add,
3569 };
3570
3571 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
3572 {
3573         adev->mode_info.funcs = &dce_v10_0_display_funcs;
3574 }
3575
3576 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
3577         .set = dce_v10_0_set_crtc_irq_state,
3578         .process = dce_v10_0_crtc_irq,
3579 };
3580
3581 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = {
3582         .set = dce_v10_0_set_pageflip_irq_state,
3583         .process = dce_v10_0_pageflip_irq,
3584 };
3585
3586 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
3587         .set = dce_v10_0_set_hpd_irq_state,
3588         .process = dce_v10_0_hpd_irq,
3589 };
3590
3591 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
3592 {
3593         if (adev->mode_info.num_crtc > 0)
3594                 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3595         else
3596                 adev->crtc_irq.num_types = 0;
3597         adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
3598
3599         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3600         adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
3601
3602         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3603         adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
3604 }
3605
3606 const struct amdgpu_ip_block_version dce_v10_0_ip_block =
3607 {
3608         .type = AMD_IP_BLOCK_TYPE_DCE,
3609         .major = 10,
3610         .minor = 0,
3611         .rev = 0,
3612         .funcs = &dce_v10_0_ip_funcs,
3613 };
3614
3615 const struct amdgpu_ip_block_version dce_v10_1_ip_block =
3616 {
3617         .type = AMD_IP_BLOCK_TYPE_DCE,
3618         .major = 10,
3619         .minor = 1,
3620         .rev = 0,
3621         .funcs = &dce_v10_0_ip_funcs,
3622 };