Merge tag 'drm-misc-next-2021-04-09' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "dc/dc_edid_parser.h"
38 #include "amdgpu_dm_trace.h"
39
40 #include "vid.h"
41 #include "amdgpu.h"
42 #include "amdgpu_display.h"
43 #include "amdgpu_ucode.h"
44 #include "atom.h"
45 #include "amdgpu_dm.h"
46 #ifdef CONFIG_DRM_AMD_DC_HDCP
47 #include "amdgpu_dm_hdcp.h"
48 #include <drm/drm_hdcp.h>
49 #endif
50 #include "amdgpu_pm.h"
51
52 #include "amd_shared.h"
53 #include "amdgpu_dm_irq.h"
54 #include "dm_helpers.h"
55 #include "amdgpu_dm_mst_types.h"
56 #if defined(CONFIG_DEBUG_FS)
57 #include "amdgpu_dm_debugfs.h"
58 #endif
59
60 #include "ivsrcid/ivsrcid_vislands30.h"
61
62 #include <linux/module.h>
63 #include <linux/moduleparam.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
69
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 static bool
216 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
217                                  struct drm_crtc_state *new_crtc_state);
218 /*
219  * dm_vblank_get_counter
220  *
221  * @brief
222  * Get counter for number of vertical blanks
223  *
224  * @param
225  * struct amdgpu_device *adev - [in] desired amdgpu device
226  * int disp_idx - [in] which CRTC to get the counter from
227  *
228  * @return
229  * Counter for vertical blanks
230  */
231 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
232 {
233         if (crtc >= adev->mode_info.num_crtc)
234                 return 0;
235         else {
236                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
237
238                 if (acrtc->dm_irq_params.stream == NULL) {
239                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
240                                   crtc);
241                         return 0;
242                 }
243
244                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245         }
246 }
247
248 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
249                                   u32 *vbl, u32 *position)
250 {
251         uint32_t v_blank_start, v_blank_end, h_position, v_position;
252
253         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254                 return -EINVAL;
255         else {
256                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
257
258                 if (acrtc->dm_irq_params.stream ==  NULL) {
259                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
260                                   crtc);
261                         return 0;
262                 }
263
264                 /*
265                  * TODO rework base driver to use values directly.
266                  * for now parse it back into reg-format
267                  */
268                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
269                                          &v_blank_start,
270                                          &v_blank_end,
271                                          &h_position,
272                                          &v_position);
273
274                 *position = v_position | (h_position << 16);
275                 *vbl = v_blank_start | (v_blank_end << 16);
276         }
277
278         return 0;
279 }
280
281 static bool dm_is_idle(void *handle)
282 {
283         /* XXX todo */
284         return true;
285 }
286
287 static int dm_wait_for_idle(void *handle)
288 {
289         /* XXX todo */
290         return 0;
291 }
292
293 static bool dm_check_soft_reset(void *handle)
294 {
295         return false;
296 }
297
298 static int dm_soft_reset(void *handle)
299 {
300         /* XXX todo */
301         return 0;
302 }
303
304 static struct amdgpu_crtc *
305 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306                      int otg_inst)
307 {
308         struct drm_device *dev = adev_to_drm(adev);
309         struct drm_crtc *crtc;
310         struct amdgpu_crtc *amdgpu_crtc;
311
312         if (otg_inst == -1) {
313                 WARN_ON(1);
314                 return adev->mode_info.crtcs[0];
315         }
316
317         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
318                 amdgpu_crtc = to_amdgpu_crtc(crtc);
319
320                 if (amdgpu_crtc->otg_inst == otg_inst)
321                         return amdgpu_crtc;
322         }
323
324         return NULL;
325 }
326
327 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
328 {
329         return acrtc->dm_irq_params.freesync_config.state ==
330                        VRR_STATE_ACTIVE_VARIABLE ||
331                acrtc->dm_irq_params.freesync_config.state ==
332                        VRR_STATE_ACTIVE_FIXED;
333 }
334
335 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
336 {
337         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
338                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 }
340
341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342                                               struct dm_crtc_state *new_state)
343 {
344         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
345                 return true;
346         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
347                 return true;
348         else
349                 return false;
350 }
351
352 /**
353  * dm_pflip_high_irq() - Handle pageflip interrupt
354  * @interrupt_params: ignored
355  *
356  * Handles the pageflip interrupt by notifying all interested parties
357  * that the pageflip has been completed.
358  */
359 static void dm_pflip_high_irq(void *interrupt_params)
360 {
361         struct amdgpu_crtc *amdgpu_crtc;
362         struct common_irq_params *irq_params = interrupt_params;
363         struct amdgpu_device *adev = irq_params->adev;
364         unsigned long flags;
365         struct drm_pending_vblank_event *e;
366         uint32_t vpos, hpos, v_blank_start, v_blank_end;
367         bool vrr_active;
368
369         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
370
371         /* IRQ could occur when in initial stage */
372         /* TODO work and BO cleanup */
373         if (amdgpu_crtc == NULL) {
374                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
375                 return;
376         }
377
378         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
379
380         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
381                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
382                                                  amdgpu_crtc->pflip_status,
383                                                  AMDGPU_FLIP_SUBMITTED,
384                                                  amdgpu_crtc->crtc_id,
385                                                  amdgpu_crtc);
386                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
387                 return;
388         }
389
390         /* page flip completed. */
391         e = amdgpu_crtc->event;
392         amdgpu_crtc->event = NULL;
393
394         if (!e)
395                 WARN_ON(1);
396
397         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
398
399         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
400         if (!vrr_active ||
401             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
402                                       &v_blank_end, &hpos, &vpos) ||
403             (vpos < v_blank_start)) {
404                 /* Update to correct count and vblank timestamp if racing with
405                  * vblank irq. This also updates to the correct vblank timestamp
406                  * even in VRR mode, as scanout is past the front-porch atm.
407                  */
408                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
409
410                 /* Wake up userspace by sending the pageflip event with proper
411                  * count and timestamp of vblank of flip completion.
412                  */
413                 if (e) {
414                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
415
416                         /* Event sent, so done with vblank for this flip */
417                         drm_crtc_vblank_put(&amdgpu_crtc->base);
418                 }
419         } else if (e) {
420                 /* VRR active and inside front-porch: vblank count and
421                  * timestamp for pageflip event will only be up to date after
422                  * drm_crtc_handle_vblank() has been executed from late vblank
423                  * irq handler after start of back-porch (vline 0). We queue the
424                  * pageflip event for send-out by drm_crtc_handle_vblank() with
425                  * updated timestamp and count, once it runs after us.
426                  *
427                  * We need to open-code this instead of using the helper
428                  * drm_crtc_arm_vblank_event(), as that helper would
429                  * call drm_crtc_accurate_vblank_count(), which we must
430                  * not call in VRR mode while we are in front-porch!
431                  */
432
433                 /* sequence will be replaced by real count during send-out. */
434                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
435                 e->pipe = amdgpu_crtc->crtc_id;
436
437                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
438                 e = NULL;
439         }
440
441         /* Keep track of vblank of this flip for flip throttling. We use the
442          * cooked hw counter, as that one incremented at start of this vblank
443          * of pageflip completion, so last_flip_vblank is the forbidden count
444          * for queueing new pageflips if vsync + VRR is enabled.
445          */
446         amdgpu_crtc->dm_irq_params.last_flip_vblank =
447                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
448
449         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
450         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
451
452         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
453                          amdgpu_crtc->crtc_id, amdgpu_crtc,
454                          vrr_active, (int) !e);
455 }
456
457 static void dm_vupdate_high_irq(void *interrupt_params)
458 {
459         struct common_irq_params *irq_params = interrupt_params;
460         struct amdgpu_device *adev = irq_params->adev;
461         struct amdgpu_crtc *acrtc;
462         unsigned long flags;
463         int vrr_active;
464
465         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
466
467         if (acrtc) {
468                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
469
470                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
471                               acrtc->crtc_id,
472                               vrr_active);
473
474                 /* Core vblank handling is done here after end of front-porch in
475                  * vrr mode, as vblank timestamping will give valid results
476                  * while now done after front-porch. This will also deliver
477                  * page-flip completion events that have been queued to us
478                  * if a pageflip happened inside front-porch.
479                  */
480                 if (vrr_active) {
481                         drm_crtc_handle_vblank(&acrtc->base);
482
483                         /* BTR processing for pre-DCE12 ASICs */
484                         if (acrtc->dm_irq_params.stream &&
485                             adev->family < AMDGPU_FAMILY_AI) {
486                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
487                                 mod_freesync_handle_v_update(
488                                     adev->dm.freesync_module,
489                                     acrtc->dm_irq_params.stream,
490                                     &acrtc->dm_irq_params.vrr_params);
491
492                                 dc_stream_adjust_vmin_vmax(
493                                     adev->dm.dc,
494                                     acrtc->dm_irq_params.stream,
495                                     &acrtc->dm_irq_params.vrr_params.adjust);
496                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
497                         }
498                 }
499         }
500 }
501
502 /**
503  * dm_crtc_high_irq() - Handles CRTC interrupt
504  * @interrupt_params: used for determining the CRTC instance
505  *
506  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
507  * event handler.
508  */
509 static void dm_crtc_high_irq(void *interrupt_params)
510 {
511         struct common_irq_params *irq_params = interrupt_params;
512         struct amdgpu_device *adev = irq_params->adev;
513         struct amdgpu_crtc *acrtc;
514         unsigned long flags;
515         int vrr_active;
516
517         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
518         if (!acrtc)
519                 return;
520
521         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
522
523         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
524                       vrr_active, acrtc->dm_irq_params.active_planes);
525
526         /**
527          * Core vblank handling at start of front-porch is only possible
528          * in non-vrr mode, as only there vblank timestamping will give
529          * valid results while done in front-porch. Otherwise defer it
530          * to dm_vupdate_high_irq after end of front-porch.
531          */
532         if (!vrr_active)
533                 drm_crtc_handle_vblank(&acrtc->base);
534
535         /**
536          * Following stuff must happen at start of vblank, for crc
537          * computation and below-the-range btr support in vrr mode.
538          */
539         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
540
541         /* BTR updates need to happen before VUPDATE on Vega and above. */
542         if (adev->family < AMDGPU_FAMILY_AI)
543                 return;
544
545         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
546
547         if (acrtc->dm_irq_params.stream &&
548             acrtc->dm_irq_params.vrr_params.supported &&
549             acrtc->dm_irq_params.freesync_config.state ==
550                     VRR_STATE_ACTIVE_VARIABLE) {
551                 mod_freesync_handle_v_update(adev->dm.freesync_module,
552                                              acrtc->dm_irq_params.stream,
553                                              &acrtc->dm_irq_params.vrr_params);
554
555                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
556                                            &acrtc->dm_irq_params.vrr_params.adjust);
557         }
558
559         /*
560          * If there aren't any active_planes then DCH HUBP may be clock-gated.
561          * In that case, pageflip completion interrupts won't fire and pageflip
562          * completion events won't get delivered. Prevent this by sending
563          * pending pageflip events from here if a flip is still pending.
564          *
565          * If any planes are enabled, use dm_pflip_high_irq() instead, to
566          * avoid race conditions between flip programming and completion,
567          * which could cause too early flip completion events.
568          */
569         if (adev->family >= AMDGPU_FAMILY_RV &&
570             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
571             acrtc->dm_irq_params.active_planes == 0) {
572                 if (acrtc->event) {
573                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
574                         acrtc->event = NULL;
575                         drm_crtc_vblank_put(&acrtc->base);
576                 }
577                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
578         }
579
580         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
581 }
582
583 #if defined(CONFIG_DRM_AMD_DC_DCN)
584 /**
585  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
586  * DCN generation ASICs
587  * @interrupt params - interrupt parameters
588  *
589  * Used to set crc window/read out crc value at vertical line 0 position
590  */
591 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
592 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
593 {
594         struct common_irq_params *irq_params = interrupt_params;
595         struct amdgpu_device *adev = irq_params->adev;
596         struct amdgpu_crtc *acrtc;
597
598         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
599
600         if (!acrtc)
601                 return;
602
603         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
604 }
605 #endif
606 #endif
607
608 static int dm_set_clockgating_state(void *handle,
609                   enum amd_clockgating_state state)
610 {
611         return 0;
612 }
613
614 static int dm_set_powergating_state(void *handle,
615                   enum amd_powergating_state state)
616 {
617         return 0;
618 }
619
620 /* Prototypes of private functions */
621 static int dm_early_init(void* handle);
622
623 /* Allocate memory for FBC compressed data  */
624 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
625 {
626         struct drm_device *dev = connector->dev;
627         struct amdgpu_device *adev = drm_to_adev(dev);
628         struct dm_compressor_info *compressor = &adev->dm.compressor;
629         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
630         struct drm_display_mode *mode;
631         unsigned long max_size = 0;
632
633         if (adev->dm.dc->fbc_compressor == NULL)
634                 return;
635
636         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
637                 return;
638
639         if (compressor->bo_ptr)
640                 return;
641
642
643         list_for_each_entry(mode, &connector->modes, head) {
644                 if (max_size < mode->htotal * mode->vtotal)
645                         max_size = mode->htotal * mode->vtotal;
646         }
647
648         if (max_size) {
649                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
650                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
651                             &compressor->gpu_addr, &compressor->cpu_addr);
652
653                 if (r)
654                         DRM_ERROR("DM: Failed to initialize FBC\n");
655                 else {
656                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
657                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
658                 }
659
660         }
661
662 }
663
664 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
665                                           int pipe, bool *enabled,
666                                           unsigned char *buf, int max_bytes)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_connector *connector;
671         struct drm_connector_list_iter conn_iter;
672         struct amdgpu_dm_connector *aconnector;
673         int ret = 0;
674
675         *enabled = false;
676
677         mutex_lock(&adev->dm.audio_lock);
678
679         drm_connector_list_iter_begin(dev, &conn_iter);
680         drm_for_each_connector_iter(connector, &conn_iter) {
681                 aconnector = to_amdgpu_dm_connector(connector);
682                 if (aconnector->audio_inst != port)
683                         continue;
684
685                 *enabled = true;
686                 ret = drm_eld_size(connector->eld);
687                 memcpy(buf, connector->eld, min(max_bytes, ret));
688
689                 break;
690         }
691         drm_connector_list_iter_end(&conn_iter);
692
693         mutex_unlock(&adev->dm.audio_lock);
694
695         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
696
697         return ret;
698 }
699
700 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
701         .get_eld = amdgpu_dm_audio_component_get_eld,
702 };
703
704 static int amdgpu_dm_audio_component_bind(struct device *kdev,
705                                        struct device *hda_kdev, void *data)
706 {
707         struct drm_device *dev = dev_get_drvdata(kdev);
708         struct amdgpu_device *adev = drm_to_adev(dev);
709         struct drm_audio_component *acomp = data;
710
711         acomp->ops = &amdgpu_dm_audio_component_ops;
712         acomp->dev = kdev;
713         adev->dm.audio_component = acomp;
714
715         return 0;
716 }
717
718 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
719                                           struct device *hda_kdev, void *data)
720 {
721         struct drm_device *dev = dev_get_drvdata(kdev);
722         struct amdgpu_device *adev = drm_to_adev(dev);
723         struct drm_audio_component *acomp = data;
724
725         acomp->ops = NULL;
726         acomp->dev = NULL;
727         adev->dm.audio_component = NULL;
728 }
729
730 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
731         .bind   = amdgpu_dm_audio_component_bind,
732         .unbind = amdgpu_dm_audio_component_unbind,
733 };
734
735 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
736 {
737         int i, ret;
738
739         if (!amdgpu_audio)
740                 return 0;
741
742         adev->mode_info.audio.enabled = true;
743
744         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
745
746         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
747                 adev->mode_info.audio.pin[i].channels = -1;
748                 adev->mode_info.audio.pin[i].rate = -1;
749                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
750                 adev->mode_info.audio.pin[i].status_bits = 0;
751                 adev->mode_info.audio.pin[i].category_code = 0;
752                 adev->mode_info.audio.pin[i].connected = false;
753                 adev->mode_info.audio.pin[i].id =
754                         adev->dm.dc->res_pool->audios[i]->inst;
755                 adev->mode_info.audio.pin[i].offset = 0;
756         }
757
758         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
759         if (ret < 0)
760                 return ret;
761
762         adev->dm.audio_registered = true;
763
764         return 0;
765 }
766
767 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
768 {
769         if (!amdgpu_audio)
770                 return;
771
772         if (!adev->mode_info.audio.enabled)
773                 return;
774
775         if (adev->dm.audio_registered) {
776                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
777                 adev->dm.audio_registered = false;
778         }
779
780         /* TODO: Disable audio? */
781
782         adev->mode_info.audio.enabled = false;
783 }
784
785 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
786 {
787         struct drm_audio_component *acomp = adev->dm.audio_component;
788
789         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
790                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
791
792                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
793                                                  pin, -1);
794         }
795 }
796
797 static int dm_dmub_hw_init(struct amdgpu_device *adev)
798 {
799         const struct dmcub_firmware_header_v1_0 *hdr;
800         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
801         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
802         const struct firmware *dmub_fw = adev->dm.dmub_fw;
803         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
804         struct abm *abm = adev->dm.dc->res_pool->abm;
805         struct dmub_srv_hw_params hw_params;
806         enum dmub_status status;
807         const unsigned char *fw_inst_const, *fw_bss_data;
808         uint32_t i, fw_inst_const_size, fw_bss_data_size;
809         bool has_hw_support;
810
811         if (!dmub_srv)
812                 /* DMUB isn't supported on the ASIC. */
813                 return 0;
814
815         if (!fb_info) {
816                 DRM_ERROR("No framebuffer info for DMUB service.\n");
817                 return -EINVAL;
818         }
819
820         if (!dmub_fw) {
821                 /* Firmware required for DMUB support. */
822                 DRM_ERROR("No firmware provided for DMUB.\n");
823                 return -EINVAL;
824         }
825
826         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
827         if (status != DMUB_STATUS_OK) {
828                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
829                 return -EINVAL;
830         }
831
832         if (!has_hw_support) {
833                 DRM_INFO("DMUB unsupported on ASIC\n");
834                 return 0;
835         }
836
837         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
838
839         fw_inst_const = dmub_fw->data +
840                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
841                         PSP_HEADER_BYTES;
842
843         fw_bss_data = dmub_fw->data +
844                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
845                       le32_to_cpu(hdr->inst_const_bytes);
846
847         /* Copy firmware and bios info into FB memory. */
848         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
849                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
850
851         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
852
853         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
854          * amdgpu_ucode_init_single_fw will load dmub firmware
855          * fw_inst_const part to cw0; otherwise, the firmware back door load
856          * will be done by dm_dmub_hw_init
857          */
858         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
859                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
860                                 fw_inst_const_size);
861         }
862
863         if (fw_bss_data_size)
864                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
865                        fw_bss_data, fw_bss_data_size);
866
867         /* Copy firmware bios info into FB memory. */
868         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
869                adev->bios_size);
870
871         /* Reset regions that need to be reset. */
872         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
873         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
874
875         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
876                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
877
878         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
879                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
880
881         /* Initialize hardware. */
882         memset(&hw_params, 0, sizeof(hw_params));
883         hw_params.fb_base = adev->gmc.fb_start;
884         hw_params.fb_offset = adev->gmc.aper_base;
885
886         /* backdoor load firmware and trigger dmub running */
887         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
888                 hw_params.load_inst_const = true;
889
890         if (dmcu)
891                 hw_params.psp_version = dmcu->psp_version;
892
893         for (i = 0; i < fb_info->num_fb; ++i)
894                 hw_params.fb[i] = &fb_info->fb[i];
895
896         status = dmub_srv_hw_init(dmub_srv, &hw_params);
897         if (status != DMUB_STATUS_OK) {
898                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
899                 return -EINVAL;
900         }
901
902         /* Wait for firmware load to finish. */
903         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
904         if (status != DMUB_STATUS_OK)
905                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
906
907         /* Init DMCU and ABM if available. */
908         if (dmcu && abm) {
909                 dmcu->funcs->dmcu_init(dmcu);
910                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
911         }
912
913         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
914         if (!adev->dm.dc->ctx->dmub_srv) {
915                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
916                 return -ENOMEM;
917         }
918
919         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
920                  adev->dm.dmcub_fw_version);
921
922         return 0;
923 }
924
925 #if defined(CONFIG_DRM_AMD_DC_DCN)
926 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
927 {
928         uint64_t pt_base;
929         uint32_t logical_addr_low;
930         uint32_t logical_addr_high;
931         uint32_t agp_base, agp_bot, agp_top;
932         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
933
934         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
935         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
936
937         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
938                 /*
939                  * Raven2 has a HW issue that it is unable to use the vram which
940                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
941                  * workaround that increase system aperture high address (add 1)
942                  * to get rid of the VM fault and hardware hang.
943                  */
944                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
945         else
946                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
947
948         agp_base = 0;
949         agp_bot = adev->gmc.agp_start >> 24;
950         agp_top = adev->gmc.agp_end >> 24;
951
952
953         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
954         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
955         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
956         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
957         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
958         page_table_base.low_part = lower_32_bits(pt_base);
959
960         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
961         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
962
963         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
964         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
965         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
966
967         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
968         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
969         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
970
971         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
972         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
973         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
974
975         pa_config->is_hvm_enabled = 0;
976
977 }
978 #endif
979 #if defined(CONFIG_DRM_AMD_DC_DCN)
980 static void event_mall_stutter(struct work_struct *work)
981 {
982
983         struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
984         struct amdgpu_display_manager *dm = vblank_work->dm;
985
986         mutex_lock(&dm->dc_lock);
987
988         if (vblank_work->enable)
989                 dm->active_vblank_irq_count++;
990         else
991                 dm->active_vblank_irq_count--;
992
993         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
994
995         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
996
997
998         mutex_unlock(&dm->dc_lock);
999 }
1000
1001 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
1002 {
1003
1004         int max_caps = dc->caps.max_links;
1005         struct vblank_workqueue *vblank_work;
1006         int i = 0;
1007
1008         vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
1009         if (ZERO_OR_NULL_PTR(vblank_work)) {
1010                 kfree(vblank_work);
1011                 return NULL;
1012         }
1013
1014         for (i = 0; i < max_caps; i++)
1015                 INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
1016
1017         return vblank_work;
1018 }
1019 #endif
1020 static int amdgpu_dm_init(struct amdgpu_device *adev)
1021 {
1022         struct dc_init_data init_data;
1023 #ifdef CONFIG_DRM_AMD_DC_HDCP
1024         struct dc_callback_init init_params;
1025 #endif
1026         int r;
1027
1028         adev->dm.ddev = adev_to_drm(adev);
1029         adev->dm.adev = adev;
1030
1031         /* Zero all the fields */
1032         memset(&init_data, 0, sizeof(init_data));
1033 #ifdef CONFIG_DRM_AMD_DC_HDCP
1034         memset(&init_params, 0, sizeof(init_params));
1035 #endif
1036
1037         mutex_init(&adev->dm.dc_lock);
1038         mutex_init(&adev->dm.audio_lock);
1039 #if defined(CONFIG_DRM_AMD_DC_DCN)
1040         spin_lock_init(&adev->dm.vblank_lock);
1041 #endif
1042
1043         if(amdgpu_dm_irq_init(adev)) {
1044                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1045                 goto error;
1046         }
1047
1048         init_data.asic_id.chip_family = adev->family;
1049
1050         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1051         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1052
1053         init_data.asic_id.vram_width = adev->gmc.vram_width;
1054         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1055         init_data.asic_id.atombios_base_address =
1056                 adev->mode_info.atom_context->bios;
1057
1058         init_data.driver = adev;
1059
1060         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1061
1062         if (!adev->dm.cgs_device) {
1063                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1064                 goto error;
1065         }
1066
1067         init_data.cgs_device = adev->dm.cgs_device;
1068
1069         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1070
1071         switch (adev->asic_type) {
1072         case CHIP_CARRIZO:
1073         case CHIP_STONEY:
1074         case CHIP_RAVEN:
1075         case CHIP_RENOIR:
1076                 init_data.flags.gpu_vm_support = true;
1077                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1078                         init_data.flags.disable_dmcu = true;
1079                 break;
1080 #if defined(CONFIG_DRM_AMD_DC_DCN)
1081         case CHIP_VANGOGH:
1082                 init_data.flags.gpu_vm_support = true;
1083                 break;
1084 #endif
1085         default:
1086                 break;
1087         }
1088
1089         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1090                 init_data.flags.fbc_support = true;
1091
1092         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1093                 init_data.flags.multi_mon_pp_mclk_switch = true;
1094
1095         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1096                 init_data.flags.disable_fractional_pwm = true;
1097
1098         init_data.flags.power_down_display_on_boot = true;
1099
1100         INIT_LIST_HEAD(&adev->dm.da_list);
1101         /* Display Core create. */
1102         adev->dm.dc = dc_create(&init_data);
1103
1104         if (adev->dm.dc) {
1105                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1106         } else {
1107                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1108                 goto error;
1109         }
1110
1111         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1112                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1113                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1114         }
1115
1116         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1117                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1118
1119         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1120                 adev->dm.dc->debug.disable_stutter = true;
1121
1122         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1123                 adev->dm.dc->debug.disable_dsc = true;
1124
1125         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1126                 adev->dm.dc->debug.disable_clock_gate = true;
1127
1128         r = dm_dmub_hw_init(adev);
1129         if (r) {
1130                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1131                 goto error;
1132         }
1133
1134         dc_hardware_init(adev->dm.dc);
1135
1136 #if defined(CONFIG_DRM_AMD_DC_DCN)
1137         if (adev->apu_flags) {
1138                 struct dc_phy_addr_space_config pa_config;
1139
1140                 mmhub_read_system_context(adev, &pa_config);
1141
1142                 // Call the DC init_memory func
1143                 dc_setup_system_context(adev->dm.dc, &pa_config);
1144         }
1145 #endif
1146
1147         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1148         if (!adev->dm.freesync_module) {
1149                 DRM_ERROR(
1150                 "amdgpu: failed to initialize freesync_module.\n");
1151         } else
1152                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1153                                 adev->dm.freesync_module);
1154
1155         amdgpu_dm_init_color_mod();
1156
1157 #if defined(CONFIG_DRM_AMD_DC_DCN)
1158         if (adev->dm.dc->caps.max_links > 0) {
1159                 adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
1160
1161                 if (!adev->dm.vblank_workqueue)
1162                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1163                 else
1164                         DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
1165         }
1166 #endif
1167
1168 #ifdef CONFIG_DRM_AMD_DC_HDCP
1169         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1170                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1171
1172                 if (!adev->dm.hdcp_workqueue)
1173                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1174                 else
1175                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1176
1177                 dc_init_callbacks(adev->dm.dc, &init_params);
1178         }
1179 #endif
1180 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1181         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1182 #endif
1183         if (amdgpu_dm_initialize_drm_device(adev)) {
1184                 DRM_ERROR(
1185                 "amdgpu: failed to initialize sw for display support.\n");
1186                 goto error;
1187         }
1188
1189         /* create fake encoders for MST */
1190         dm_dp_create_fake_mst_encoders(adev);
1191
1192         /* TODO: Add_display_info? */
1193
1194         /* TODO use dynamic cursor width */
1195         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1196         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1197
1198         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1199                 DRM_ERROR(
1200                 "amdgpu: failed to initialize sw for display support.\n");
1201                 goto error;
1202         }
1203
1204
1205         DRM_DEBUG_DRIVER("KMS initialized.\n");
1206
1207         return 0;
1208 error:
1209         amdgpu_dm_fini(adev);
1210
1211         return -EINVAL;
1212 }
1213
1214 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1215 {
1216         int i;
1217
1218         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1219                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1220         }
1221
1222         amdgpu_dm_audio_fini(adev);
1223
1224         amdgpu_dm_destroy_drm_device(&adev->dm);
1225
1226 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1227         if (adev->dm.crc_rd_wrk) {
1228                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1229                 kfree(adev->dm.crc_rd_wrk);
1230                 adev->dm.crc_rd_wrk = NULL;
1231         }
1232 #endif
1233 #ifdef CONFIG_DRM_AMD_DC_HDCP
1234         if (adev->dm.hdcp_workqueue) {
1235                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1236                 adev->dm.hdcp_workqueue = NULL;
1237         }
1238
1239         if (adev->dm.dc)
1240                 dc_deinit_callbacks(adev->dm.dc);
1241 #endif
1242
1243 #if defined(CONFIG_DRM_AMD_DC_DCN)
1244         if (adev->dm.vblank_workqueue) {
1245                 adev->dm.vblank_workqueue->dm = NULL;
1246                 kfree(adev->dm.vblank_workqueue);
1247                 adev->dm.vblank_workqueue = NULL;
1248         }
1249 #endif
1250
1251         if (adev->dm.dc->ctx->dmub_srv) {
1252                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1253                 adev->dm.dc->ctx->dmub_srv = NULL;
1254         }
1255
1256         if (adev->dm.dmub_bo)
1257                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1258                                       &adev->dm.dmub_bo_gpu_addr,
1259                                       &adev->dm.dmub_bo_cpu_addr);
1260
1261         /* DC Destroy TODO: Replace destroy DAL */
1262         if (adev->dm.dc)
1263                 dc_destroy(&adev->dm.dc);
1264         /*
1265          * TODO: pageflip, vlank interrupt
1266          *
1267          * amdgpu_dm_irq_fini(adev);
1268          */
1269
1270         if (adev->dm.cgs_device) {
1271                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1272                 adev->dm.cgs_device = NULL;
1273         }
1274         if (adev->dm.freesync_module) {
1275                 mod_freesync_destroy(adev->dm.freesync_module);
1276                 adev->dm.freesync_module = NULL;
1277         }
1278
1279         mutex_destroy(&adev->dm.audio_lock);
1280         mutex_destroy(&adev->dm.dc_lock);
1281
1282         return;
1283 }
1284
1285 static int load_dmcu_fw(struct amdgpu_device *adev)
1286 {
1287         const char *fw_name_dmcu = NULL;
1288         int r;
1289         const struct dmcu_firmware_header_v1_0 *hdr;
1290
1291         switch(adev->asic_type) {
1292 #if defined(CONFIG_DRM_AMD_DC_SI)
1293         case CHIP_TAHITI:
1294         case CHIP_PITCAIRN:
1295         case CHIP_VERDE:
1296         case CHIP_OLAND:
1297 #endif
1298         case CHIP_BONAIRE:
1299         case CHIP_HAWAII:
1300         case CHIP_KAVERI:
1301         case CHIP_KABINI:
1302         case CHIP_MULLINS:
1303         case CHIP_TONGA:
1304         case CHIP_FIJI:
1305         case CHIP_CARRIZO:
1306         case CHIP_STONEY:
1307         case CHIP_POLARIS11:
1308         case CHIP_POLARIS10:
1309         case CHIP_POLARIS12:
1310         case CHIP_VEGAM:
1311         case CHIP_VEGA10:
1312         case CHIP_VEGA12:
1313         case CHIP_VEGA20:
1314         case CHIP_NAVI10:
1315         case CHIP_NAVI14:
1316         case CHIP_RENOIR:
1317         case CHIP_SIENNA_CICHLID:
1318         case CHIP_NAVY_FLOUNDER:
1319         case CHIP_DIMGREY_CAVEFISH:
1320         case CHIP_VANGOGH:
1321                 return 0;
1322         case CHIP_NAVI12:
1323                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1324                 break;
1325         case CHIP_RAVEN:
1326                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1327                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1328                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1329                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1330                 else
1331                         return 0;
1332                 break;
1333         default:
1334                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1335                 return -EINVAL;
1336         }
1337
1338         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1339                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1340                 return 0;
1341         }
1342
1343         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1344         if (r == -ENOENT) {
1345                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1346                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1347                 adev->dm.fw_dmcu = NULL;
1348                 return 0;
1349         }
1350         if (r) {
1351                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1352                         fw_name_dmcu);
1353                 return r;
1354         }
1355
1356         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1357         if (r) {
1358                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1359                         fw_name_dmcu);
1360                 release_firmware(adev->dm.fw_dmcu);
1361                 adev->dm.fw_dmcu = NULL;
1362                 return r;
1363         }
1364
1365         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1366         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1367         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1368         adev->firmware.fw_size +=
1369                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1370
1371         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1372         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1373         adev->firmware.fw_size +=
1374                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1375
1376         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1377
1378         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1379
1380         return 0;
1381 }
1382
1383 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1384 {
1385         struct amdgpu_device *adev = ctx;
1386
1387         return dm_read_reg(adev->dm.dc->ctx, address);
1388 }
1389
1390 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1391                                      uint32_t value)
1392 {
1393         struct amdgpu_device *adev = ctx;
1394
1395         return dm_write_reg(adev->dm.dc->ctx, address, value);
1396 }
1397
1398 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1399 {
1400         struct dmub_srv_create_params create_params;
1401         struct dmub_srv_region_params region_params;
1402         struct dmub_srv_region_info region_info;
1403         struct dmub_srv_fb_params fb_params;
1404         struct dmub_srv_fb_info *fb_info;
1405         struct dmub_srv *dmub_srv;
1406         const struct dmcub_firmware_header_v1_0 *hdr;
1407         const char *fw_name_dmub;
1408         enum dmub_asic dmub_asic;
1409         enum dmub_status status;
1410         int r;
1411
1412         switch (adev->asic_type) {
1413         case CHIP_RENOIR:
1414                 dmub_asic = DMUB_ASIC_DCN21;
1415                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1416                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1417                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1418                 break;
1419         case CHIP_SIENNA_CICHLID:
1420                 dmub_asic = DMUB_ASIC_DCN30;
1421                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1422                 break;
1423         case CHIP_NAVY_FLOUNDER:
1424                 dmub_asic = DMUB_ASIC_DCN30;
1425                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1426                 break;
1427         case CHIP_VANGOGH:
1428                 dmub_asic = DMUB_ASIC_DCN301;
1429                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1430                 break;
1431         case CHIP_DIMGREY_CAVEFISH:
1432                 dmub_asic = DMUB_ASIC_DCN302;
1433                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1434                 break;
1435
1436         default:
1437                 /* ASIC doesn't support DMUB. */
1438                 return 0;
1439         }
1440
1441         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1442         if (r) {
1443                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1444                 return 0;
1445         }
1446
1447         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1448         if (r) {
1449                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1450                 return 0;
1451         }
1452
1453         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1454
1455         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1456                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1457                         AMDGPU_UCODE_ID_DMCUB;
1458                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1459                         adev->dm.dmub_fw;
1460                 adev->firmware.fw_size +=
1461                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1462
1463                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1464                          adev->dm.dmcub_fw_version);
1465         }
1466
1467         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1468
1469         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1470         dmub_srv = adev->dm.dmub_srv;
1471
1472         if (!dmub_srv) {
1473                 DRM_ERROR("Failed to allocate DMUB service!\n");
1474                 return -ENOMEM;
1475         }
1476
1477         memset(&create_params, 0, sizeof(create_params));
1478         create_params.user_ctx = adev;
1479         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1480         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1481         create_params.asic = dmub_asic;
1482
1483         /* Create the DMUB service. */
1484         status = dmub_srv_create(dmub_srv, &create_params);
1485         if (status != DMUB_STATUS_OK) {
1486                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1487                 return -EINVAL;
1488         }
1489
1490         /* Calculate the size of all the regions for the DMUB service. */
1491         memset(&region_params, 0, sizeof(region_params));
1492
1493         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1494                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1495         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1496         region_params.vbios_size = adev->bios_size;
1497         region_params.fw_bss_data = region_params.bss_data_size ?
1498                 adev->dm.dmub_fw->data +
1499                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1500                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1501         region_params.fw_inst_const =
1502                 adev->dm.dmub_fw->data +
1503                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1504                 PSP_HEADER_BYTES;
1505
1506         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1507                                            &region_info);
1508
1509         if (status != DMUB_STATUS_OK) {
1510                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1511                 return -EINVAL;
1512         }
1513
1514         /*
1515          * Allocate a framebuffer based on the total size of all the regions.
1516          * TODO: Move this into GART.
1517          */
1518         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1519                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1520                                     &adev->dm.dmub_bo_gpu_addr,
1521                                     &adev->dm.dmub_bo_cpu_addr);
1522         if (r)
1523                 return r;
1524
1525         /* Rebase the regions on the framebuffer address. */
1526         memset(&fb_params, 0, sizeof(fb_params));
1527         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1528         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1529         fb_params.region_info = &region_info;
1530
1531         adev->dm.dmub_fb_info =
1532                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1533         fb_info = adev->dm.dmub_fb_info;
1534
1535         if (!fb_info) {
1536                 DRM_ERROR(
1537                         "Failed to allocate framebuffer info for DMUB service!\n");
1538                 return -ENOMEM;
1539         }
1540
1541         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1542         if (status != DMUB_STATUS_OK) {
1543                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1544                 return -EINVAL;
1545         }
1546
1547         return 0;
1548 }
1549
1550 static int dm_sw_init(void *handle)
1551 {
1552         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1553         int r;
1554
1555         r = dm_dmub_sw_init(adev);
1556         if (r)
1557                 return r;
1558
1559         return load_dmcu_fw(adev);
1560 }
1561
1562 static int dm_sw_fini(void *handle)
1563 {
1564         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1565
1566         kfree(adev->dm.dmub_fb_info);
1567         adev->dm.dmub_fb_info = NULL;
1568
1569         if (adev->dm.dmub_srv) {
1570                 dmub_srv_destroy(adev->dm.dmub_srv);
1571                 adev->dm.dmub_srv = NULL;
1572         }
1573
1574         release_firmware(adev->dm.dmub_fw);
1575         adev->dm.dmub_fw = NULL;
1576
1577         release_firmware(adev->dm.fw_dmcu);
1578         adev->dm.fw_dmcu = NULL;
1579
1580         return 0;
1581 }
1582
1583 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1584 {
1585         struct amdgpu_dm_connector *aconnector;
1586         struct drm_connector *connector;
1587         struct drm_connector_list_iter iter;
1588         int ret = 0;
1589
1590         drm_connector_list_iter_begin(dev, &iter);
1591         drm_for_each_connector_iter(connector, &iter) {
1592                 aconnector = to_amdgpu_dm_connector(connector);
1593                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1594                     aconnector->mst_mgr.aux) {
1595                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1596                                          aconnector,
1597                                          aconnector->base.base.id);
1598
1599                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1600                         if (ret < 0) {
1601                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1602                                 aconnector->dc_link->type =
1603                                         dc_connection_single;
1604                                 break;
1605                         }
1606                 }
1607         }
1608         drm_connector_list_iter_end(&iter);
1609
1610         return ret;
1611 }
1612
1613 static int dm_late_init(void *handle)
1614 {
1615         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1616
1617         struct dmcu_iram_parameters params;
1618         unsigned int linear_lut[16];
1619         int i;
1620         struct dmcu *dmcu = NULL;
1621         bool ret = true;
1622
1623         dmcu = adev->dm.dc->res_pool->dmcu;
1624
1625         for (i = 0; i < 16; i++)
1626                 linear_lut[i] = 0xFFFF * i / 15;
1627
1628         params.set = 0;
1629         params.backlight_ramping_start = 0xCCCC;
1630         params.backlight_ramping_reduction = 0xCCCCCCCC;
1631         params.backlight_lut_array_size = 16;
1632         params.backlight_lut_array = linear_lut;
1633
1634         /* Min backlight level after ABM reduction,  Don't allow below 1%
1635          * 0xFFFF x 0.01 = 0x28F
1636          */
1637         params.min_abm_backlight = 0x28F;
1638
1639         /* In the case where abm is implemented on dmcub,
1640          * dmcu object will be null.
1641          * ABM 2.4 and up are implemented on dmcub.
1642          */
1643         if (dmcu)
1644                 ret = dmcu_load_iram(dmcu, params);
1645         else if (adev->dm.dc->ctx->dmub_srv)
1646                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1647
1648         if (!ret)
1649                 return -EINVAL;
1650
1651         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1652 }
1653
1654 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1655 {
1656         struct amdgpu_dm_connector *aconnector;
1657         struct drm_connector *connector;
1658         struct drm_connector_list_iter iter;
1659         struct drm_dp_mst_topology_mgr *mgr;
1660         int ret;
1661         bool need_hotplug = false;
1662
1663         drm_connector_list_iter_begin(dev, &iter);
1664         drm_for_each_connector_iter(connector, &iter) {
1665                 aconnector = to_amdgpu_dm_connector(connector);
1666                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1667                     aconnector->mst_port)
1668                         continue;
1669
1670                 mgr = &aconnector->mst_mgr;
1671
1672                 if (suspend) {
1673                         drm_dp_mst_topology_mgr_suspend(mgr);
1674                 } else {
1675                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1676                         if (ret < 0) {
1677                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1678                                 need_hotplug = true;
1679                         }
1680                 }
1681         }
1682         drm_connector_list_iter_end(&iter);
1683
1684         if (need_hotplug)
1685                 drm_kms_helper_hotplug_event(dev);
1686 }
1687
1688 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1689 {
1690         struct smu_context *smu = &adev->smu;
1691         int ret = 0;
1692
1693         if (!is_support_sw_smu(adev))
1694                 return 0;
1695
1696         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1697          * on window driver dc implementation.
1698          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1699          * should be passed to smu during boot up and resume from s3.
1700          * boot up: dc calculate dcn watermark clock settings within dc_create,
1701          * dcn20_resource_construct
1702          * then call pplib functions below to pass the settings to smu:
1703          * smu_set_watermarks_for_clock_ranges
1704          * smu_set_watermarks_table
1705          * navi10_set_watermarks_table
1706          * smu_write_watermarks_table
1707          *
1708          * For Renoir, clock settings of dcn watermark are also fixed values.
1709          * dc has implemented different flow for window driver:
1710          * dc_hardware_init / dc_set_power_state
1711          * dcn10_init_hw
1712          * notify_wm_ranges
1713          * set_wm_ranges
1714          * -- Linux
1715          * smu_set_watermarks_for_clock_ranges
1716          * renoir_set_watermarks_table
1717          * smu_write_watermarks_table
1718          *
1719          * For Linux,
1720          * dc_hardware_init -> amdgpu_dm_init
1721          * dc_set_power_state --> dm_resume
1722          *
1723          * therefore, this function apply to navi10/12/14 but not Renoir
1724          * *
1725          */
1726         switch(adev->asic_type) {
1727         case CHIP_NAVI10:
1728         case CHIP_NAVI14:
1729         case CHIP_NAVI12:
1730                 break;
1731         default:
1732                 return 0;
1733         }
1734
1735         ret = smu_write_watermarks_table(smu);
1736         if (ret) {
1737                 DRM_ERROR("Failed to update WMTABLE!\n");
1738                 return ret;
1739         }
1740
1741         return 0;
1742 }
1743
1744 /**
1745  * dm_hw_init() - Initialize DC device
1746  * @handle: The base driver device containing the amdgpu_dm device.
1747  *
1748  * Initialize the &struct amdgpu_display_manager device. This involves calling
1749  * the initializers of each DM component, then populating the struct with them.
1750  *
1751  * Although the function implies hardware initialization, both hardware and
1752  * software are initialized here. Splitting them out to their relevant init
1753  * hooks is a future TODO item.
1754  *
1755  * Some notable things that are initialized here:
1756  *
1757  * - Display Core, both software and hardware
1758  * - DC modules that we need (freesync and color management)
1759  * - DRM software states
1760  * - Interrupt sources and handlers
1761  * - Vblank support
1762  * - Debug FS entries, if enabled
1763  */
1764 static int dm_hw_init(void *handle)
1765 {
1766         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1767         /* Create DAL display manager */
1768         amdgpu_dm_init(adev);
1769         amdgpu_dm_hpd_init(adev);
1770
1771         return 0;
1772 }
1773
1774 /**
1775  * dm_hw_fini() - Teardown DC device
1776  * @handle: The base driver device containing the amdgpu_dm device.
1777  *
1778  * Teardown components within &struct amdgpu_display_manager that require
1779  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1780  * were loaded. Also flush IRQ workqueues and disable them.
1781  */
1782 static int dm_hw_fini(void *handle)
1783 {
1784         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1785
1786         amdgpu_dm_hpd_fini(adev);
1787
1788         amdgpu_dm_irq_fini(adev);
1789         amdgpu_dm_fini(adev);
1790         return 0;
1791 }
1792
1793
1794 static int dm_enable_vblank(struct drm_crtc *crtc);
1795 static void dm_disable_vblank(struct drm_crtc *crtc);
1796
1797 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1798                                  struct dc_state *state, bool enable)
1799 {
1800         enum dc_irq_source irq_source;
1801         struct amdgpu_crtc *acrtc;
1802         int rc = -EBUSY;
1803         int i = 0;
1804
1805         for (i = 0; i < state->stream_count; i++) {
1806                 acrtc = get_crtc_by_otg_inst(
1807                                 adev, state->stream_status[i].primary_otg_inst);
1808
1809                 if (acrtc && state->stream_status[i].plane_count != 0) {
1810                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1811                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1812                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1813                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1814                         if (rc)
1815                                 DRM_WARN("Failed to %s pflip interrupts\n",
1816                                          enable ? "enable" : "disable");
1817
1818                         if (enable) {
1819                                 rc = dm_enable_vblank(&acrtc->base);
1820                                 if (rc)
1821                                         DRM_WARN("Failed to enable vblank interrupts\n");
1822                         } else {
1823                                 dm_disable_vblank(&acrtc->base);
1824                         }
1825
1826                 }
1827         }
1828
1829 }
1830
1831 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1832 {
1833         struct dc_state *context = NULL;
1834         enum dc_status res = DC_ERROR_UNEXPECTED;
1835         int i;
1836         struct dc_stream_state *del_streams[MAX_PIPES];
1837         int del_streams_count = 0;
1838
1839         memset(del_streams, 0, sizeof(del_streams));
1840
1841         context = dc_create_state(dc);
1842         if (context == NULL)
1843                 goto context_alloc_fail;
1844
1845         dc_resource_state_copy_construct_current(dc, context);
1846
1847         /* First remove from context all streams */
1848         for (i = 0; i < context->stream_count; i++) {
1849                 struct dc_stream_state *stream = context->streams[i];
1850
1851                 del_streams[del_streams_count++] = stream;
1852         }
1853
1854         /* Remove all planes for removed streams and then remove the streams */
1855         for (i = 0; i < del_streams_count; i++) {
1856                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1857                         res = DC_FAIL_DETACH_SURFACES;
1858                         goto fail;
1859                 }
1860
1861                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1862                 if (res != DC_OK)
1863                         goto fail;
1864         }
1865
1866
1867         res = dc_validate_global_state(dc, context, false);
1868
1869         if (res != DC_OK) {
1870                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1871                 goto fail;
1872         }
1873
1874         res = dc_commit_state(dc, context);
1875
1876 fail:
1877         dc_release_state(context);
1878
1879 context_alloc_fail:
1880         return res;
1881 }
1882
1883 static int dm_suspend(void *handle)
1884 {
1885         struct amdgpu_device *adev = handle;
1886         struct amdgpu_display_manager *dm = &adev->dm;
1887         int ret = 0;
1888
1889         if (amdgpu_in_reset(adev)) {
1890                 mutex_lock(&dm->dc_lock);
1891
1892 #if defined(CONFIG_DRM_AMD_DC_DCN)
1893                 dc_allow_idle_optimizations(adev->dm.dc, false);
1894 #endif
1895
1896                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1897
1898                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1899
1900                 amdgpu_dm_commit_zero_streams(dm->dc);
1901
1902                 amdgpu_dm_irq_suspend(adev);
1903
1904                 return ret;
1905         }
1906
1907 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
1908         amdgpu_dm_crtc_secure_display_suspend(adev);
1909 #endif
1910         WARN_ON(adev->dm.cached_state);
1911         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1912
1913         s3_handle_mst(adev_to_drm(adev), true);
1914
1915         amdgpu_dm_irq_suspend(adev);
1916
1917
1918         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1919
1920         return 0;
1921 }
1922
1923 static struct amdgpu_dm_connector *
1924 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1925                                              struct drm_crtc *crtc)
1926 {
1927         uint32_t i;
1928         struct drm_connector_state *new_con_state;
1929         struct drm_connector *connector;
1930         struct drm_crtc *crtc_from_state;
1931
1932         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1933                 crtc_from_state = new_con_state->crtc;
1934
1935                 if (crtc_from_state == crtc)
1936                         return to_amdgpu_dm_connector(connector);
1937         }
1938
1939         return NULL;
1940 }
1941
1942 static void emulated_link_detect(struct dc_link *link)
1943 {
1944         struct dc_sink_init_data sink_init_data = { 0 };
1945         struct display_sink_capability sink_caps = { 0 };
1946         enum dc_edid_status edid_status;
1947         struct dc_context *dc_ctx = link->ctx;
1948         struct dc_sink *sink = NULL;
1949         struct dc_sink *prev_sink = NULL;
1950
1951         link->type = dc_connection_none;
1952         prev_sink = link->local_sink;
1953
1954         if (prev_sink)
1955                 dc_sink_release(prev_sink);
1956
1957         switch (link->connector_signal) {
1958         case SIGNAL_TYPE_HDMI_TYPE_A: {
1959                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1960                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1961                 break;
1962         }
1963
1964         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1965                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1966                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1967                 break;
1968         }
1969
1970         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1971                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1972                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1973                 break;
1974         }
1975
1976         case SIGNAL_TYPE_LVDS: {
1977                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1978                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1979                 break;
1980         }
1981
1982         case SIGNAL_TYPE_EDP: {
1983                 sink_caps.transaction_type =
1984                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1985                 sink_caps.signal = SIGNAL_TYPE_EDP;
1986                 break;
1987         }
1988
1989         case SIGNAL_TYPE_DISPLAY_PORT: {
1990                 sink_caps.transaction_type =
1991                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1992                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1993                 break;
1994         }
1995
1996         default:
1997                 DC_ERROR("Invalid connector type! signal:%d\n",
1998                         link->connector_signal);
1999                 return;
2000         }
2001
2002         sink_init_data.link = link;
2003         sink_init_data.sink_signal = sink_caps.signal;
2004
2005         sink = dc_sink_create(&sink_init_data);
2006         if (!sink) {
2007                 DC_ERROR("Failed to create sink!\n");
2008                 return;
2009         }
2010
2011         /* dc_sink_create returns a new reference */
2012         link->local_sink = sink;
2013
2014         edid_status = dm_helpers_read_local_edid(
2015                         link->ctx,
2016                         link,
2017                         sink);
2018
2019         if (edid_status != EDID_OK)
2020                 DC_ERROR("Failed to read EDID");
2021
2022 }
2023
2024 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2025                                      struct amdgpu_display_manager *dm)
2026 {
2027         struct {
2028                 struct dc_surface_update surface_updates[MAX_SURFACES];
2029                 struct dc_plane_info plane_infos[MAX_SURFACES];
2030                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2031                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2032                 struct dc_stream_update stream_update;
2033         } * bundle;
2034         int k, m;
2035
2036         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2037
2038         if (!bundle) {
2039                 dm_error("Failed to allocate update bundle\n");
2040                 goto cleanup;
2041         }
2042
2043         for (k = 0; k < dc_state->stream_count; k++) {
2044                 bundle->stream_update.stream = dc_state->streams[k];
2045
2046                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2047                         bundle->surface_updates[m].surface =
2048                                 dc_state->stream_status->plane_states[m];
2049                         bundle->surface_updates[m].surface->force_full_update =
2050                                 true;
2051                 }
2052                 dc_commit_updates_for_stream(
2053                         dm->dc, bundle->surface_updates,
2054                         dc_state->stream_status->plane_count,
2055                         dc_state->streams[k], &bundle->stream_update, dc_state);
2056         }
2057
2058 cleanup:
2059         kfree(bundle);
2060
2061         return;
2062 }
2063
2064 static void dm_set_dpms_off(struct dc_link *link)
2065 {
2066         struct dc_stream_state *stream_state;
2067         struct amdgpu_dm_connector *aconnector = link->priv;
2068         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2069         struct dc_stream_update stream_update;
2070         bool dpms_off = true;
2071
2072         memset(&stream_update, 0, sizeof(stream_update));
2073         stream_update.dpms_off = &dpms_off;
2074
2075         mutex_lock(&adev->dm.dc_lock);
2076         stream_state = dc_stream_find_from_link(link);
2077
2078         if (stream_state == NULL) {
2079                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2080                 mutex_unlock(&adev->dm.dc_lock);
2081                 return;
2082         }
2083
2084         stream_update.stream = stream_state;
2085         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2086                                      stream_state, &stream_update,
2087                                      stream_state->ctx->dc->current_state);
2088         mutex_unlock(&adev->dm.dc_lock);
2089 }
2090
2091 static int dm_resume(void *handle)
2092 {
2093         struct amdgpu_device *adev = handle;
2094         struct drm_device *ddev = adev_to_drm(adev);
2095         struct amdgpu_display_manager *dm = &adev->dm;
2096         struct amdgpu_dm_connector *aconnector;
2097         struct drm_connector *connector;
2098         struct drm_connector_list_iter iter;
2099         struct drm_crtc *crtc;
2100         struct drm_crtc_state *new_crtc_state;
2101         struct dm_crtc_state *dm_new_crtc_state;
2102         struct drm_plane *plane;
2103         struct drm_plane_state *new_plane_state;
2104         struct dm_plane_state *dm_new_plane_state;
2105         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2106         enum dc_connection_type new_connection_type = dc_connection_none;
2107         struct dc_state *dc_state;
2108         int i, r, j;
2109
2110         if (amdgpu_in_reset(adev)) {
2111                 dc_state = dm->cached_dc_state;
2112
2113                 r = dm_dmub_hw_init(adev);
2114                 if (r)
2115                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2116
2117                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2118                 dc_resume(dm->dc);
2119
2120                 amdgpu_dm_irq_resume_early(adev);
2121
2122                 for (i = 0; i < dc_state->stream_count; i++) {
2123                         dc_state->streams[i]->mode_changed = true;
2124                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2125                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2126                                         = 0xffffffff;
2127                         }
2128                 }
2129
2130                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2131
2132                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2133
2134                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2135
2136                 dc_release_state(dm->cached_dc_state);
2137                 dm->cached_dc_state = NULL;
2138
2139                 amdgpu_dm_irq_resume_late(adev);
2140
2141                 mutex_unlock(&dm->dc_lock);
2142
2143                 return 0;
2144         }
2145         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2146         dc_release_state(dm_state->context);
2147         dm_state->context = dc_create_state(dm->dc);
2148         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2149         dc_resource_state_construct(dm->dc, dm_state->context);
2150
2151         /* Before powering on DC we need to re-initialize DMUB. */
2152         r = dm_dmub_hw_init(adev);
2153         if (r)
2154                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2155
2156         /* power on hardware */
2157         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2158
2159         /* program HPD filter */
2160         dc_resume(dm->dc);
2161
2162         /*
2163          * early enable HPD Rx IRQ, should be done before set mode as short
2164          * pulse interrupts are used for MST
2165          */
2166         amdgpu_dm_irq_resume_early(adev);
2167
2168         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2169         s3_handle_mst(ddev, false);
2170
2171         /* Do detection*/
2172         drm_connector_list_iter_begin(ddev, &iter);
2173         drm_for_each_connector_iter(connector, &iter) {
2174                 aconnector = to_amdgpu_dm_connector(connector);
2175
2176                 /*
2177                  * this is the case when traversing through already created
2178                  * MST connectors, should be skipped
2179                  */
2180                 if (aconnector->mst_port)
2181                         continue;
2182
2183                 mutex_lock(&aconnector->hpd_lock);
2184                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2185                         DRM_ERROR("KMS: Failed to detect connector\n");
2186
2187                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2188                         emulated_link_detect(aconnector->dc_link);
2189                 else
2190                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2191
2192                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2193                         aconnector->fake_enable = false;
2194
2195                 if (aconnector->dc_sink)
2196                         dc_sink_release(aconnector->dc_sink);
2197                 aconnector->dc_sink = NULL;
2198                 amdgpu_dm_update_connector_after_detect(aconnector);
2199                 mutex_unlock(&aconnector->hpd_lock);
2200         }
2201         drm_connector_list_iter_end(&iter);
2202
2203         /* Force mode set in atomic commit */
2204         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2205                 new_crtc_state->active_changed = true;
2206
2207         /*
2208          * atomic_check is expected to create the dc states. We need to release
2209          * them here, since they were duplicated as part of the suspend
2210          * procedure.
2211          */
2212         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2213                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2214                 if (dm_new_crtc_state->stream) {
2215                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2216                         dc_stream_release(dm_new_crtc_state->stream);
2217                         dm_new_crtc_state->stream = NULL;
2218                 }
2219         }
2220
2221         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2222                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2223                 if (dm_new_plane_state->dc_state) {
2224                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2225                         dc_plane_state_release(dm_new_plane_state->dc_state);
2226                         dm_new_plane_state->dc_state = NULL;
2227                 }
2228         }
2229
2230         drm_atomic_helper_resume(ddev, dm->cached_state);
2231
2232         dm->cached_state = NULL;
2233
2234 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
2235         amdgpu_dm_crtc_secure_display_resume(adev);
2236 #endif
2237
2238         amdgpu_dm_irq_resume_late(adev);
2239
2240         amdgpu_dm_smu_write_watermarks_table(adev);
2241
2242         return 0;
2243 }
2244
2245 /**
2246  * DOC: DM Lifecycle
2247  *
2248  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2249  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2250  * the base driver's device list to be initialized and torn down accordingly.
2251  *
2252  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2253  */
2254
2255 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2256         .name = "dm",
2257         .early_init = dm_early_init,
2258         .late_init = dm_late_init,
2259         .sw_init = dm_sw_init,
2260         .sw_fini = dm_sw_fini,
2261         .hw_init = dm_hw_init,
2262         .hw_fini = dm_hw_fini,
2263         .suspend = dm_suspend,
2264         .resume = dm_resume,
2265         .is_idle = dm_is_idle,
2266         .wait_for_idle = dm_wait_for_idle,
2267         .check_soft_reset = dm_check_soft_reset,
2268         .soft_reset = dm_soft_reset,
2269         .set_clockgating_state = dm_set_clockgating_state,
2270         .set_powergating_state = dm_set_powergating_state,
2271 };
2272
2273 const struct amdgpu_ip_block_version dm_ip_block =
2274 {
2275         .type = AMD_IP_BLOCK_TYPE_DCE,
2276         .major = 1,
2277         .minor = 0,
2278         .rev = 0,
2279         .funcs = &amdgpu_dm_funcs,
2280 };
2281
2282
2283 /**
2284  * DOC: atomic
2285  *
2286  * *WIP*
2287  */
2288
2289 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2290         .fb_create = amdgpu_display_user_framebuffer_create,
2291         .get_format_info = amd_get_format_info,
2292         .output_poll_changed = drm_fb_helper_output_poll_changed,
2293         .atomic_check = amdgpu_dm_atomic_check,
2294         .atomic_commit = drm_atomic_helper_commit,
2295 };
2296
2297 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2298         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2299 };
2300
2301 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2302 {
2303         u32 max_cll, min_cll, max, min, q, r;
2304         struct amdgpu_dm_backlight_caps *caps;
2305         struct amdgpu_display_manager *dm;
2306         struct drm_connector *conn_base;
2307         struct amdgpu_device *adev;
2308         struct dc_link *link = NULL;
2309         static const u8 pre_computed_values[] = {
2310                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2311                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2312
2313         if (!aconnector || !aconnector->dc_link)
2314                 return;
2315
2316         link = aconnector->dc_link;
2317         if (link->connector_signal != SIGNAL_TYPE_EDP)
2318                 return;
2319
2320         conn_base = &aconnector->base;
2321         adev = drm_to_adev(conn_base->dev);
2322         dm = &adev->dm;
2323         caps = &dm->backlight_caps;
2324         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2325         caps->aux_support = false;
2326         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2327         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2328
2329         if (caps->ext_caps->bits.oled == 1 ||
2330             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2331             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2332                 caps->aux_support = true;
2333
2334         if (amdgpu_backlight == 0)
2335                 caps->aux_support = false;
2336         else if (amdgpu_backlight == 1)
2337                 caps->aux_support = true;
2338
2339         /* From the specification (CTA-861-G), for calculating the maximum
2340          * luminance we need to use:
2341          *      Luminance = 50*2**(CV/32)
2342          * Where CV is a one-byte value.
2343          * For calculating this expression we may need float point precision;
2344          * to avoid this complexity level, we take advantage that CV is divided
2345          * by a constant. From the Euclids division algorithm, we know that CV
2346          * can be written as: CV = 32*q + r. Next, we replace CV in the
2347          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2348          * need to pre-compute the value of r/32. For pre-computing the values
2349          * We just used the following Ruby line:
2350          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2351          * The results of the above expressions can be verified at
2352          * pre_computed_values.
2353          */
2354         q = max_cll >> 5;
2355         r = max_cll % 32;
2356         max = (1 << q) * pre_computed_values[r];
2357
2358         // min luminance: maxLum * (CV/255)^2 / 100
2359         q = DIV_ROUND_CLOSEST(min_cll, 255);
2360         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2361
2362         caps->aux_max_input_signal = max;
2363         caps->aux_min_input_signal = min;
2364 }
2365
2366 void amdgpu_dm_update_connector_after_detect(
2367                 struct amdgpu_dm_connector *aconnector)
2368 {
2369         struct drm_connector *connector = &aconnector->base;
2370         struct drm_device *dev = connector->dev;
2371         struct dc_sink *sink;
2372
2373         /* MST handled by drm_mst framework */
2374         if (aconnector->mst_mgr.mst_state == true)
2375                 return;
2376
2377         sink = aconnector->dc_link->local_sink;
2378         if (sink)
2379                 dc_sink_retain(sink);
2380
2381         /*
2382          * Edid mgmt connector gets first update only in mode_valid hook and then
2383          * the connector sink is set to either fake or physical sink depends on link status.
2384          * Skip if already done during boot.
2385          */
2386         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2387                         && aconnector->dc_em_sink) {
2388
2389                 /*
2390                  * For S3 resume with headless use eml_sink to fake stream
2391                  * because on resume connector->sink is set to NULL
2392                  */
2393                 mutex_lock(&dev->mode_config.mutex);
2394
2395                 if (sink) {
2396                         if (aconnector->dc_sink) {
2397                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2398                                 /*
2399                                  * retain and release below are used to
2400                                  * bump up refcount for sink because the link doesn't point
2401                                  * to it anymore after disconnect, so on next crtc to connector
2402                                  * reshuffle by UMD we will get into unwanted dc_sink release
2403                                  */
2404                                 dc_sink_release(aconnector->dc_sink);
2405                         }
2406                         aconnector->dc_sink = sink;
2407                         dc_sink_retain(aconnector->dc_sink);
2408                         amdgpu_dm_update_freesync_caps(connector,
2409                                         aconnector->edid);
2410                 } else {
2411                         amdgpu_dm_update_freesync_caps(connector, NULL);
2412                         if (!aconnector->dc_sink) {
2413                                 aconnector->dc_sink = aconnector->dc_em_sink;
2414                                 dc_sink_retain(aconnector->dc_sink);
2415                         }
2416                 }
2417
2418                 mutex_unlock(&dev->mode_config.mutex);
2419
2420                 if (sink)
2421                         dc_sink_release(sink);
2422                 return;
2423         }
2424
2425         /*
2426          * TODO: temporary guard to look for proper fix
2427          * if this sink is MST sink, we should not do anything
2428          */
2429         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2430                 dc_sink_release(sink);
2431                 return;
2432         }
2433
2434         if (aconnector->dc_sink == sink) {
2435                 /*
2436                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2437                  * Do nothing!!
2438                  */
2439                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2440                                 aconnector->connector_id);
2441                 if (sink)
2442                         dc_sink_release(sink);
2443                 return;
2444         }
2445
2446         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2447                 aconnector->connector_id, aconnector->dc_sink, sink);
2448
2449         mutex_lock(&dev->mode_config.mutex);
2450
2451         /*
2452          * 1. Update status of the drm connector
2453          * 2. Send an event and let userspace tell us what to do
2454          */
2455         if (sink) {
2456                 /*
2457                  * TODO: check if we still need the S3 mode update workaround.
2458                  * If yes, put it here.
2459                  */
2460                 if (aconnector->dc_sink) {
2461                         amdgpu_dm_update_freesync_caps(connector, NULL);
2462                         dc_sink_release(aconnector->dc_sink);
2463                 }
2464
2465                 aconnector->dc_sink = sink;
2466                 dc_sink_retain(aconnector->dc_sink);
2467                 if (sink->dc_edid.length == 0) {
2468                         aconnector->edid = NULL;
2469                         if (aconnector->dc_link->aux_mode) {
2470                                 drm_dp_cec_unset_edid(
2471                                         &aconnector->dm_dp_aux.aux);
2472                         }
2473                 } else {
2474                         aconnector->edid =
2475                                 (struct edid *)sink->dc_edid.raw_edid;
2476
2477                         drm_connector_update_edid_property(connector,
2478                                                            aconnector->edid);
2479                         if (aconnector->dc_link->aux_mode)
2480                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2481                                                     aconnector->edid);
2482                 }
2483
2484                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2485                 update_connector_ext_caps(aconnector);
2486         } else {
2487                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2488                 amdgpu_dm_update_freesync_caps(connector, NULL);
2489                 drm_connector_update_edid_property(connector, NULL);
2490                 aconnector->num_modes = 0;
2491                 dc_sink_release(aconnector->dc_sink);
2492                 aconnector->dc_sink = NULL;
2493                 aconnector->edid = NULL;
2494 #ifdef CONFIG_DRM_AMD_DC_HDCP
2495                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2496                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2497                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2498 #endif
2499         }
2500
2501         mutex_unlock(&dev->mode_config.mutex);
2502
2503         update_subconnector_property(aconnector);
2504
2505         if (sink)
2506                 dc_sink_release(sink);
2507 }
2508
2509 static void handle_hpd_irq(void *param)
2510 {
2511         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2512         struct drm_connector *connector = &aconnector->base;
2513         struct drm_device *dev = connector->dev;
2514         enum dc_connection_type new_connection_type = dc_connection_none;
2515 #ifdef CONFIG_DRM_AMD_DC_HDCP
2516         struct amdgpu_device *adev = drm_to_adev(dev);
2517         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2518 #endif
2519
2520         /*
2521          * In case of failure or MST no need to update connector status or notify the OS
2522          * since (for MST case) MST does this in its own context.
2523          */
2524         mutex_lock(&aconnector->hpd_lock);
2525
2526 #ifdef CONFIG_DRM_AMD_DC_HDCP
2527         if (adev->dm.hdcp_workqueue) {
2528                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2529                 dm_con_state->update_hdcp = true;
2530         }
2531 #endif
2532         if (aconnector->fake_enable)
2533                 aconnector->fake_enable = false;
2534
2535         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2536                 DRM_ERROR("KMS: Failed to detect connector\n");
2537
2538         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2539                 emulated_link_detect(aconnector->dc_link);
2540
2541
2542                 drm_modeset_lock_all(dev);
2543                 dm_restore_drm_connector_state(dev, connector);
2544                 drm_modeset_unlock_all(dev);
2545
2546                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2547                         drm_kms_helper_hotplug_event(dev);
2548
2549         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2550                 if (new_connection_type == dc_connection_none &&
2551                     aconnector->dc_link->type == dc_connection_none)
2552                         dm_set_dpms_off(aconnector->dc_link);
2553
2554                 amdgpu_dm_update_connector_after_detect(aconnector);
2555
2556                 drm_modeset_lock_all(dev);
2557                 dm_restore_drm_connector_state(dev, connector);
2558                 drm_modeset_unlock_all(dev);
2559
2560                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2561                         drm_kms_helper_hotplug_event(dev);
2562         }
2563         mutex_unlock(&aconnector->hpd_lock);
2564
2565 }
2566
2567 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2568 {
2569         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2570         uint8_t dret;
2571         bool new_irq_handled = false;
2572         int dpcd_addr;
2573         int dpcd_bytes_to_read;
2574
2575         const int max_process_count = 30;
2576         int process_count = 0;
2577
2578         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2579
2580         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2581                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2582                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2583                 dpcd_addr = DP_SINK_COUNT;
2584         } else {
2585                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2586                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2587                 dpcd_addr = DP_SINK_COUNT_ESI;
2588         }
2589
2590         dret = drm_dp_dpcd_read(
2591                 &aconnector->dm_dp_aux.aux,
2592                 dpcd_addr,
2593                 esi,
2594                 dpcd_bytes_to_read);
2595
2596         while (dret == dpcd_bytes_to_read &&
2597                 process_count < max_process_count) {
2598                 uint8_t retry;
2599                 dret = 0;
2600
2601                 process_count++;
2602
2603                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2604                 /* handle HPD short pulse irq */
2605                 if (aconnector->mst_mgr.mst_state)
2606                         drm_dp_mst_hpd_irq(
2607                                 &aconnector->mst_mgr,
2608                                 esi,
2609                                 &new_irq_handled);
2610
2611                 if (new_irq_handled) {
2612                         /* ACK at DPCD to notify down stream */
2613                         const int ack_dpcd_bytes_to_write =
2614                                 dpcd_bytes_to_read - 1;
2615
2616                         for (retry = 0; retry < 3; retry++) {
2617                                 uint8_t wret;
2618
2619                                 wret = drm_dp_dpcd_write(
2620                                         &aconnector->dm_dp_aux.aux,
2621                                         dpcd_addr + 1,
2622                                         &esi[1],
2623                                         ack_dpcd_bytes_to_write);
2624                                 if (wret == ack_dpcd_bytes_to_write)
2625                                         break;
2626                         }
2627
2628                         /* check if there is new irq to be handled */
2629                         dret = drm_dp_dpcd_read(
2630                                 &aconnector->dm_dp_aux.aux,
2631                                 dpcd_addr,
2632                                 esi,
2633                                 dpcd_bytes_to_read);
2634
2635                         new_irq_handled = false;
2636                 } else {
2637                         break;
2638                 }
2639         }
2640
2641         if (process_count == max_process_count)
2642                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2643 }
2644
2645 static void handle_hpd_rx_irq(void *param)
2646 {
2647         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2648         struct drm_connector *connector = &aconnector->base;
2649         struct drm_device *dev = connector->dev;
2650         struct dc_link *dc_link = aconnector->dc_link;
2651         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2652         bool result = false;
2653         enum dc_connection_type new_connection_type = dc_connection_none;
2654         struct amdgpu_device *adev = drm_to_adev(dev);
2655         union hpd_irq_data hpd_irq_data;
2656
2657         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2658
2659         /*
2660          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2661          * conflict, after implement i2c helper, this mutex should be
2662          * retired.
2663          */
2664         if (dc_link->type != dc_connection_mst_branch)
2665                 mutex_lock(&aconnector->hpd_lock);
2666
2667         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2668
2669         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2670                 (dc_link->type == dc_connection_mst_branch)) {
2671                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2672                         result = true;
2673                         dm_handle_hpd_rx_irq(aconnector);
2674                         goto out;
2675                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2676                         result = false;
2677                         dm_handle_hpd_rx_irq(aconnector);
2678                         goto out;
2679                 }
2680         }
2681
2682         mutex_lock(&adev->dm.dc_lock);
2683 #ifdef CONFIG_DRM_AMD_DC_HDCP
2684         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2685 #else
2686         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2687 #endif
2688         mutex_unlock(&adev->dm.dc_lock);
2689
2690 out:
2691         if (result && !is_mst_root_connector) {
2692                 /* Downstream Port status changed. */
2693                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2694                         DRM_ERROR("KMS: Failed to detect connector\n");
2695
2696                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2697                         emulated_link_detect(dc_link);
2698
2699                         if (aconnector->fake_enable)
2700                                 aconnector->fake_enable = false;
2701
2702                         amdgpu_dm_update_connector_after_detect(aconnector);
2703
2704
2705                         drm_modeset_lock_all(dev);
2706                         dm_restore_drm_connector_state(dev, connector);
2707                         drm_modeset_unlock_all(dev);
2708
2709                         drm_kms_helper_hotplug_event(dev);
2710                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2711
2712                         if (aconnector->fake_enable)
2713                                 aconnector->fake_enable = false;
2714
2715                         amdgpu_dm_update_connector_after_detect(aconnector);
2716
2717
2718                         drm_modeset_lock_all(dev);
2719                         dm_restore_drm_connector_state(dev, connector);
2720                         drm_modeset_unlock_all(dev);
2721
2722                         drm_kms_helper_hotplug_event(dev);
2723                 }
2724         }
2725 #ifdef CONFIG_DRM_AMD_DC_HDCP
2726         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2727                 if (adev->dm.hdcp_workqueue)
2728                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2729         }
2730 #endif
2731
2732         if (dc_link->type != dc_connection_mst_branch) {
2733                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2734                 mutex_unlock(&aconnector->hpd_lock);
2735         }
2736 }
2737
2738 static void register_hpd_handlers(struct amdgpu_device *adev)
2739 {
2740         struct drm_device *dev = adev_to_drm(adev);
2741         struct drm_connector *connector;
2742         struct amdgpu_dm_connector *aconnector;
2743         const struct dc_link *dc_link;
2744         struct dc_interrupt_params int_params = {0};
2745
2746         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2747         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2748
2749         list_for_each_entry(connector,
2750                         &dev->mode_config.connector_list, head) {
2751
2752                 aconnector = to_amdgpu_dm_connector(connector);
2753                 dc_link = aconnector->dc_link;
2754
2755                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2756                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2757                         int_params.irq_source = dc_link->irq_source_hpd;
2758
2759                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2760                                         handle_hpd_irq,
2761                                         (void *) aconnector);
2762                 }
2763
2764                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2765
2766                         /* Also register for DP short pulse (hpd_rx). */
2767                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2768                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2769
2770                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2771                                         handle_hpd_rx_irq,
2772                                         (void *) aconnector);
2773                 }
2774         }
2775 }
2776
2777 #if defined(CONFIG_DRM_AMD_DC_SI)
2778 /* Register IRQ sources and initialize IRQ callbacks */
2779 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2780 {
2781         struct dc *dc = adev->dm.dc;
2782         struct common_irq_params *c_irq_params;
2783         struct dc_interrupt_params int_params = {0};
2784         int r;
2785         int i;
2786         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2787
2788         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2789         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2790
2791         /*
2792          * Actions of amdgpu_irq_add_id():
2793          * 1. Register a set() function with base driver.
2794          *    Base driver will call set() function to enable/disable an
2795          *    interrupt in DC hardware.
2796          * 2. Register amdgpu_dm_irq_handler().
2797          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2798          *    coming from DC hardware.
2799          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2800          *    for acknowledging and handling. */
2801
2802         /* Use VBLANK interrupt */
2803         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2804                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2805                 if (r) {
2806                         DRM_ERROR("Failed to add crtc irq id!\n");
2807                         return r;
2808                 }
2809
2810                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2811                 int_params.irq_source =
2812                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2813
2814                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2815
2816                 c_irq_params->adev = adev;
2817                 c_irq_params->irq_src = int_params.irq_source;
2818
2819                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2820                                 dm_crtc_high_irq, c_irq_params);
2821         }
2822
2823         /* Use GRPH_PFLIP interrupt */
2824         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2825                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2826                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2827                 if (r) {
2828                         DRM_ERROR("Failed to add page flip irq id!\n");
2829                         return r;
2830                 }
2831
2832                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2833                 int_params.irq_source =
2834                         dc_interrupt_to_irq_source(dc, i, 0);
2835
2836                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2837
2838                 c_irq_params->adev = adev;
2839                 c_irq_params->irq_src = int_params.irq_source;
2840
2841                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2842                                 dm_pflip_high_irq, c_irq_params);
2843
2844         }
2845
2846         /* HPD */
2847         r = amdgpu_irq_add_id(adev, client_id,
2848                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2849         if (r) {
2850                 DRM_ERROR("Failed to add hpd irq id!\n");
2851                 return r;
2852         }
2853
2854         register_hpd_handlers(adev);
2855
2856         return 0;
2857 }
2858 #endif
2859
2860 /* Register IRQ sources and initialize IRQ callbacks */
2861 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2862 {
2863         struct dc *dc = adev->dm.dc;
2864         struct common_irq_params *c_irq_params;
2865         struct dc_interrupt_params int_params = {0};
2866         int r;
2867         int i;
2868         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2869
2870         if (adev->asic_type >= CHIP_VEGA10)
2871                 client_id = SOC15_IH_CLIENTID_DCE;
2872
2873         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2874         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2875
2876         /*
2877          * Actions of amdgpu_irq_add_id():
2878          * 1. Register a set() function with base driver.
2879          *    Base driver will call set() function to enable/disable an
2880          *    interrupt in DC hardware.
2881          * 2. Register amdgpu_dm_irq_handler().
2882          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2883          *    coming from DC hardware.
2884          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2885          *    for acknowledging and handling. */
2886
2887         /* Use VBLANK interrupt */
2888         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2889                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2890                 if (r) {
2891                         DRM_ERROR("Failed to add crtc irq id!\n");
2892                         return r;
2893                 }
2894
2895                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2896                 int_params.irq_source =
2897                         dc_interrupt_to_irq_source(dc, i, 0);
2898
2899                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2900
2901                 c_irq_params->adev = adev;
2902                 c_irq_params->irq_src = int_params.irq_source;
2903
2904                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2905                                 dm_crtc_high_irq, c_irq_params);
2906         }
2907
2908         /* Use VUPDATE interrupt */
2909         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2910                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2911                 if (r) {
2912                         DRM_ERROR("Failed to add vupdate irq id!\n");
2913                         return r;
2914                 }
2915
2916                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2917                 int_params.irq_source =
2918                         dc_interrupt_to_irq_source(dc, i, 0);
2919
2920                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2921
2922                 c_irq_params->adev = adev;
2923                 c_irq_params->irq_src = int_params.irq_source;
2924
2925                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2926                                 dm_vupdate_high_irq, c_irq_params);
2927         }
2928
2929         /* Use GRPH_PFLIP interrupt */
2930         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2931                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2932                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2933                 if (r) {
2934                         DRM_ERROR("Failed to add page flip irq id!\n");
2935                         return r;
2936                 }
2937
2938                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2939                 int_params.irq_source =
2940                         dc_interrupt_to_irq_source(dc, i, 0);
2941
2942                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2943
2944                 c_irq_params->adev = adev;
2945                 c_irq_params->irq_src = int_params.irq_source;
2946
2947                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2948                                 dm_pflip_high_irq, c_irq_params);
2949
2950         }
2951
2952         /* HPD */
2953         r = amdgpu_irq_add_id(adev, client_id,
2954                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2955         if (r) {
2956                 DRM_ERROR("Failed to add hpd irq id!\n");
2957                 return r;
2958         }
2959
2960         register_hpd_handlers(adev);
2961
2962         return 0;
2963 }
2964
2965 #if defined(CONFIG_DRM_AMD_DC_DCN)
2966 /* Register IRQ sources and initialize IRQ callbacks */
2967 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2968 {
2969         struct dc *dc = adev->dm.dc;
2970         struct common_irq_params *c_irq_params;
2971         struct dc_interrupt_params int_params = {0};
2972         int r;
2973         int i;
2974 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
2975         static const unsigned int vrtl_int_srcid[] = {
2976                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
2977                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
2978                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
2979                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
2980                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
2981                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
2982         };
2983 #endif
2984
2985         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2986         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2987
2988         /*
2989          * Actions of amdgpu_irq_add_id():
2990          * 1. Register a set() function with base driver.
2991          *    Base driver will call set() function to enable/disable an
2992          *    interrupt in DC hardware.
2993          * 2. Register amdgpu_dm_irq_handler().
2994          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2995          *    coming from DC hardware.
2996          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2997          *    for acknowledging and handling.
2998          */
2999
3000         /* Use VSTARTUP interrupt */
3001         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3002                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3003                         i++) {
3004                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3005
3006                 if (r) {
3007                         DRM_ERROR("Failed to add crtc irq id!\n");
3008                         return r;
3009                 }
3010
3011                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3012                 int_params.irq_source =
3013                         dc_interrupt_to_irq_source(dc, i, 0);
3014
3015                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3016
3017                 c_irq_params->adev = adev;
3018                 c_irq_params->irq_src = int_params.irq_source;
3019
3020                 amdgpu_dm_irq_register_interrupt(
3021                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3022         }
3023
3024         /* Use otg vertical line interrupt */
3025 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3026         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3027                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3028                                 vrtl_int_srcid[i], &adev->vline0_irq);
3029
3030                 if (r) {
3031                         DRM_ERROR("Failed to add vline0 irq id!\n");
3032                         return r;
3033                 }
3034
3035                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3036                 int_params.irq_source =
3037                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3038
3039                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3040                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3041                         break;
3042                 }
3043
3044                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3045                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3046
3047                 c_irq_params->adev = adev;
3048                 c_irq_params->irq_src = int_params.irq_source;
3049
3050                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3051                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3052         }
3053 #endif
3054
3055         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3056          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3057          * to trigger at end of each vblank, regardless of state of the lock,
3058          * matching DCE behaviour.
3059          */
3060         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3061              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3062              i++) {
3063                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3064
3065                 if (r) {
3066                         DRM_ERROR("Failed to add vupdate irq id!\n");
3067                         return r;
3068                 }
3069
3070                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3071                 int_params.irq_source =
3072                         dc_interrupt_to_irq_source(dc, i, 0);
3073
3074                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3075
3076                 c_irq_params->adev = adev;
3077                 c_irq_params->irq_src = int_params.irq_source;
3078
3079                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3080                                 dm_vupdate_high_irq, c_irq_params);
3081         }
3082
3083         /* Use GRPH_PFLIP interrupt */
3084         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3085                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3086                         i++) {
3087                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3088                 if (r) {
3089                         DRM_ERROR("Failed to add page flip irq id!\n");
3090                         return r;
3091                 }
3092
3093                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3094                 int_params.irq_source =
3095                         dc_interrupt_to_irq_source(dc, i, 0);
3096
3097                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3098
3099                 c_irq_params->adev = adev;
3100                 c_irq_params->irq_src = int_params.irq_source;
3101
3102                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3103                                 dm_pflip_high_irq, c_irq_params);
3104
3105         }
3106
3107         /* HPD */
3108         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3109                         &adev->hpd_irq);
3110         if (r) {
3111                 DRM_ERROR("Failed to add hpd irq id!\n");
3112                 return r;
3113         }
3114
3115         register_hpd_handlers(adev);
3116
3117         return 0;
3118 }
3119 #endif
3120
3121 /*
3122  * Acquires the lock for the atomic state object and returns
3123  * the new atomic state.
3124  *
3125  * This should only be called during atomic check.
3126  */
3127 static int dm_atomic_get_state(struct drm_atomic_state *state,
3128                                struct dm_atomic_state **dm_state)
3129 {
3130         struct drm_device *dev = state->dev;
3131         struct amdgpu_device *adev = drm_to_adev(dev);
3132         struct amdgpu_display_manager *dm = &adev->dm;
3133         struct drm_private_state *priv_state;
3134
3135         if (*dm_state)
3136                 return 0;
3137
3138         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3139         if (IS_ERR(priv_state))
3140                 return PTR_ERR(priv_state);
3141
3142         *dm_state = to_dm_atomic_state(priv_state);
3143
3144         return 0;
3145 }
3146
3147 static struct dm_atomic_state *
3148 dm_atomic_get_new_state(struct drm_atomic_state *state)
3149 {
3150         struct drm_device *dev = state->dev;
3151         struct amdgpu_device *adev = drm_to_adev(dev);
3152         struct amdgpu_display_manager *dm = &adev->dm;
3153         struct drm_private_obj *obj;
3154         struct drm_private_state *new_obj_state;
3155         int i;
3156
3157         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3158                 if (obj->funcs == dm->atomic_obj.funcs)
3159                         return to_dm_atomic_state(new_obj_state);
3160         }
3161
3162         return NULL;
3163 }
3164
3165 static struct drm_private_state *
3166 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3167 {
3168         struct dm_atomic_state *old_state, *new_state;
3169
3170         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3171         if (!new_state)
3172                 return NULL;
3173
3174         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3175
3176         old_state = to_dm_atomic_state(obj->state);
3177
3178         if (old_state && old_state->context)
3179                 new_state->context = dc_copy_state(old_state->context);
3180
3181         if (!new_state->context) {
3182                 kfree(new_state);
3183                 return NULL;
3184         }
3185
3186         return &new_state->base;
3187 }
3188
3189 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3190                                     struct drm_private_state *state)
3191 {
3192         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3193
3194         if (dm_state && dm_state->context)
3195                 dc_release_state(dm_state->context);
3196
3197         kfree(dm_state);
3198 }
3199
3200 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3201         .atomic_duplicate_state = dm_atomic_duplicate_state,
3202         .atomic_destroy_state = dm_atomic_destroy_state,
3203 };
3204
3205 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3206 {
3207         struct dm_atomic_state *state;
3208         int r;
3209
3210         adev->mode_info.mode_config_initialized = true;
3211
3212         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3213         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3214
3215         adev_to_drm(adev)->mode_config.max_width = 16384;
3216         adev_to_drm(adev)->mode_config.max_height = 16384;
3217
3218         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3219         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3220         /* indicates support for immediate flip */
3221         adev_to_drm(adev)->mode_config.async_page_flip = true;
3222
3223         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3224
3225         state = kzalloc(sizeof(*state), GFP_KERNEL);
3226         if (!state)
3227                 return -ENOMEM;
3228
3229         state->context = dc_create_state(adev->dm.dc);
3230         if (!state->context) {
3231                 kfree(state);
3232                 return -ENOMEM;
3233         }
3234
3235         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3236
3237         drm_atomic_private_obj_init(adev_to_drm(adev),
3238                                     &adev->dm.atomic_obj,
3239                                     &state->base,
3240                                     &dm_atomic_state_funcs);
3241
3242         r = amdgpu_display_modeset_create_props(adev);
3243         if (r) {
3244                 dc_release_state(state->context);
3245                 kfree(state);
3246                 return r;
3247         }
3248
3249         r = amdgpu_dm_audio_init(adev);
3250         if (r) {
3251                 dc_release_state(state->context);
3252                 kfree(state);
3253                 return r;
3254         }
3255
3256         return 0;
3257 }
3258
3259 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3260 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3261 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3262
3263 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3264         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3265
3266 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3267 {
3268 #if defined(CONFIG_ACPI)
3269         struct amdgpu_dm_backlight_caps caps;
3270
3271         memset(&caps, 0, sizeof(caps));
3272
3273         if (dm->backlight_caps.caps_valid)
3274                 return;
3275
3276         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3277         if (caps.caps_valid) {
3278                 dm->backlight_caps.caps_valid = true;
3279                 if (caps.aux_support)
3280                         return;
3281                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3282                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3283         } else {
3284                 dm->backlight_caps.min_input_signal =
3285                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3286                 dm->backlight_caps.max_input_signal =
3287                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3288         }
3289 #else
3290         if (dm->backlight_caps.aux_support)
3291                 return;
3292
3293         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3294         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3295 #endif
3296 }
3297
3298 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3299                                 unsigned *min, unsigned *max)
3300 {
3301         if (!caps)
3302                 return 0;
3303
3304         if (caps->aux_support) {
3305                 // Firmware limits are in nits, DC API wants millinits.
3306                 *max = 1000 * caps->aux_max_input_signal;
3307                 *min = 1000 * caps->aux_min_input_signal;
3308         } else {
3309                 // Firmware limits are 8-bit, PWM control is 16-bit.
3310                 *max = 0x101 * caps->max_input_signal;
3311                 *min = 0x101 * caps->min_input_signal;
3312         }
3313         return 1;
3314 }
3315
3316 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3317                                         uint32_t brightness)
3318 {
3319         unsigned min, max;
3320
3321         if (!get_brightness_range(caps, &min, &max))
3322                 return brightness;
3323
3324         // Rescale 0..255 to min..max
3325         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3326                                        AMDGPU_MAX_BL_LEVEL);
3327 }
3328
3329 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3330                                       uint32_t brightness)
3331 {
3332         unsigned min, max;
3333
3334         if (!get_brightness_range(caps, &min, &max))
3335                 return brightness;
3336
3337         if (brightness < min)
3338                 return 0;
3339         // Rescale min..max to 0..255
3340         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3341                                  max - min);
3342 }
3343
3344 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3345 {
3346         struct amdgpu_display_manager *dm = bl_get_data(bd);
3347         struct amdgpu_dm_backlight_caps caps;
3348         struct dc_link *link = NULL;
3349         u32 brightness;
3350         bool rc;
3351
3352         amdgpu_dm_update_backlight_caps(dm);
3353         caps = dm->backlight_caps;
3354
3355         link = (struct dc_link *)dm->backlight_link;
3356
3357         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3358         // Change brightness based on AUX property
3359         if (caps.aux_support)
3360                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3361                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3362         else
3363                 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3364
3365         return rc ? 0 : 1;
3366 }
3367
3368 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3369 {
3370         struct amdgpu_display_manager *dm = bl_get_data(bd);
3371         struct amdgpu_dm_backlight_caps caps;
3372
3373         amdgpu_dm_update_backlight_caps(dm);
3374         caps = dm->backlight_caps;
3375
3376         if (caps.aux_support) {
3377                 struct dc_link *link = (struct dc_link *)dm->backlight_link;
3378                 u32 avg, peak;
3379                 bool rc;
3380
3381                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3382                 if (!rc)
3383                         return bd->props.brightness;
3384                 return convert_brightness_to_user(&caps, avg);
3385         } else {
3386                 int ret = dc_link_get_backlight_level(dm->backlight_link);
3387
3388                 if (ret == DC_ERROR_UNEXPECTED)
3389                         return bd->props.brightness;
3390                 return convert_brightness_to_user(&caps, ret);
3391         }
3392 }
3393
3394 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3395         .options = BL_CORE_SUSPENDRESUME,
3396         .get_brightness = amdgpu_dm_backlight_get_brightness,
3397         .update_status  = amdgpu_dm_backlight_update_status,
3398 };
3399
3400 static void
3401 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3402 {
3403         char bl_name[16];
3404         struct backlight_properties props = { 0 };
3405
3406         amdgpu_dm_update_backlight_caps(dm);
3407
3408         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3409         props.brightness = AMDGPU_MAX_BL_LEVEL;
3410         props.type = BACKLIGHT_RAW;
3411
3412         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3413                  adev_to_drm(dm->adev)->primary->index);
3414
3415         dm->backlight_dev = backlight_device_register(bl_name,
3416                                                       adev_to_drm(dm->adev)->dev,
3417                                                       dm,
3418                                                       &amdgpu_dm_backlight_ops,
3419                                                       &props);
3420
3421         if (IS_ERR(dm->backlight_dev))
3422                 DRM_ERROR("DM: Backlight registration failed!\n");
3423         else
3424                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3425 }
3426
3427 #endif
3428
3429 static int initialize_plane(struct amdgpu_display_manager *dm,
3430                             struct amdgpu_mode_info *mode_info, int plane_id,
3431                             enum drm_plane_type plane_type,
3432                             const struct dc_plane_cap *plane_cap)
3433 {
3434         struct drm_plane *plane;
3435         unsigned long possible_crtcs;
3436         int ret = 0;
3437
3438         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3439         if (!plane) {
3440                 DRM_ERROR("KMS: Failed to allocate plane\n");
3441                 return -ENOMEM;
3442         }
3443         plane->type = plane_type;
3444
3445         /*
3446          * HACK: IGT tests expect that the primary plane for a CRTC
3447          * can only have one possible CRTC. Only expose support for
3448          * any CRTC if they're not going to be used as a primary plane
3449          * for a CRTC - like overlay or underlay planes.
3450          */
3451         possible_crtcs = 1 << plane_id;
3452         if (plane_id >= dm->dc->caps.max_streams)
3453                 possible_crtcs = 0xff;
3454
3455         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3456
3457         if (ret) {
3458                 DRM_ERROR("KMS: Failed to initialize plane\n");
3459                 kfree(plane);
3460                 return ret;
3461         }
3462
3463         if (mode_info)
3464                 mode_info->planes[plane_id] = plane;
3465
3466         return ret;
3467 }
3468
3469
3470 static void register_backlight_device(struct amdgpu_display_manager *dm,
3471                                       struct dc_link *link)
3472 {
3473 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3474         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3475
3476         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3477             link->type != dc_connection_none) {
3478                 /*
3479                  * Event if registration failed, we should continue with
3480                  * DM initialization because not having a backlight control
3481                  * is better then a black screen.
3482                  */
3483                 amdgpu_dm_register_backlight_device(dm);
3484
3485                 if (dm->backlight_dev)
3486                         dm->backlight_link = link;
3487         }
3488 #endif
3489 }
3490
3491
3492 /*
3493  * In this architecture, the association
3494  * connector -> encoder -> crtc
3495  * id not really requried. The crtc and connector will hold the
3496  * display_index as an abstraction to use with DAL component
3497  *
3498  * Returns 0 on success
3499  */
3500 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3501 {
3502         struct amdgpu_display_manager *dm = &adev->dm;
3503         int32_t i;
3504         struct amdgpu_dm_connector *aconnector = NULL;
3505         struct amdgpu_encoder *aencoder = NULL;
3506         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3507         uint32_t link_cnt;
3508         int32_t primary_planes;
3509         enum dc_connection_type new_connection_type = dc_connection_none;
3510         const struct dc_plane_cap *plane;
3511
3512         dm->display_indexes_num = dm->dc->caps.max_streams;
3513         /* Update the actual used number of crtc */
3514         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3515
3516         link_cnt = dm->dc->caps.max_links;
3517         if (amdgpu_dm_mode_config_init(dm->adev)) {
3518                 DRM_ERROR("DM: Failed to initialize mode config\n");
3519                 return -EINVAL;
3520         }
3521
3522         /* There is one primary plane per CRTC */
3523         primary_planes = dm->dc->caps.max_streams;
3524         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3525
3526         /*
3527          * Initialize primary planes, implicit planes for legacy IOCTLS.
3528          * Order is reversed to match iteration order in atomic check.
3529          */
3530         for (i = (primary_planes - 1); i >= 0; i--) {
3531                 plane = &dm->dc->caps.planes[i];
3532
3533                 if (initialize_plane(dm, mode_info, i,
3534                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3535                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3536                         goto fail;
3537                 }
3538         }
3539
3540         /*
3541          * Initialize overlay planes, index starting after primary planes.
3542          * These planes have a higher DRM index than the primary planes since
3543          * they should be considered as having a higher z-order.
3544          * Order is reversed to match iteration order in atomic check.
3545          *
3546          * Only support DCN for now, and only expose one so we don't encourage
3547          * userspace to use up all the pipes.
3548          */
3549         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3550                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3551
3552                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3553                         continue;
3554
3555                 if (!plane->blends_with_above || !plane->blends_with_below)
3556                         continue;
3557
3558                 if (!plane->pixel_format_support.argb8888)
3559                         continue;
3560
3561                 if (initialize_plane(dm, NULL, primary_planes + i,
3562                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3563                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3564                         goto fail;
3565                 }
3566
3567                 /* Only create one overlay plane. */
3568                 break;
3569         }
3570
3571         for (i = 0; i < dm->dc->caps.max_streams; i++)
3572                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3573                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3574                         goto fail;
3575                 }
3576
3577         /* loops over all connectors on the board */
3578         for (i = 0; i < link_cnt; i++) {
3579                 struct dc_link *link = NULL;
3580
3581                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3582                         DRM_ERROR(
3583                                 "KMS: Cannot support more than %d display indexes\n",
3584                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3585                         continue;
3586                 }
3587
3588                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3589                 if (!aconnector)
3590                         goto fail;
3591
3592                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3593                 if (!aencoder)
3594                         goto fail;
3595
3596                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3597                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3598                         goto fail;
3599                 }
3600
3601                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3602                         DRM_ERROR("KMS: Failed to initialize connector\n");
3603                         goto fail;
3604                 }
3605
3606                 link = dc_get_link_at_index(dm->dc, i);
3607
3608                 if (!dc_link_detect_sink(link, &new_connection_type))
3609                         DRM_ERROR("KMS: Failed to detect connector\n");
3610
3611                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3612                         emulated_link_detect(link);
3613                         amdgpu_dm_update_connector_after_detect(aconnector);
3614
3615                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3616                         amdgpu_dm_update_connector_after_detect(aconnector);
3617                         register_backlight_device(dm, link);
3618                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3619                                 amdgpu_dm_set_psr_caps(link);
3620                 }
3621
3622
3623         }
3624
3625         /* Software is initialized. Now we can register interrupt handlers. */
3626         switch (adev->asic_type) {
3627 #if defined(CONFIG_DRM_AMD_DC_SI)
3628         case CHIP_TAHITI:
3629         case CHIP_PITCAIRN:
3630         case CHIP_VERDE:
3631         case CHIP_OLAND:
3632                 if (dce60_register_irq_handlers(dm->adev)) {
3633                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3634                         goto fail;
3635                 }
3636                 break;
3637 #endif
3638         case CHIP_BONAIRE:
3639         case CHIP_HAWAII:
3640         case CHIP_KAVERI:
3641         case CHIP_KABINI:
3642         case CHIP_MULLINS:
3643         case CHIP_TONGA:
3644         case CHIP_FIJI:
3645         case CHIP_CARRIZO:
3646         case CHIP_STONEY:
3647         case CHIP_POLARIS11:
3648         case CHIP_POLARIS10:
3649         case CHIP_POLARIS12:
3650         case CHIP_VEGAM:
3651         case CHIP_VEGA10:
3652         case CHIP_VEGA12:
3653         case CHIP_VEGA20:
3654                 if (dce110_register_irq_handlers(dm->adev)) {
3655                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3656                         goto fail;
3657                 }
3658                 break;
3659 #if defined(CONFIG_DRM_AMD_DC_DCN)
3660         case CHIP_RAVEN:
3661         case CHIP_NAVI12:
3662         case CHIP_NAVI10:
3663         case CHIP_NAVI14:
3664         case CHIP_RENOIR:
3665         case CHIP_SIENNA_CICHLID:
3666         case CHIP_NAVY_FLOUNDER:
3667         case CHIP_DIMGREY_CAVEFISH:
3668         case CHIP_VANGOGH:
3669                 if (dcn10_register_irq_handlers(dm->adev)) {
3670                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3671                         goto fail;
3672                 }
3673                 break;
3674 #endif
3675         default:
3676                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3677                 goto fail;
3678         }
3679
3680         return 0;
3681 fail:
3682         kfree(aencoder);
3683         kfree(aconnector);
3684
3685         return -EINVAL;
3686 }
3687
3688 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3689 {
3690         drm_mode_config_cleanup(dm->ddev);
3691         drm_atomic_private_obj_fini(&dm->atomic_obj);
3692         return;
3693 }
3694
3695 /******************************************************************************
3696  * amdgpu_display_funcs functions
3697  *****************************************************************************/
3698
3699 /*
3700  * dm_bandwidth_update - program display watermarks
3701  *
3702  * @adev: amdgpu_device pointer
3703  *
3704  * Calculate and program the display watermarks and line buffer allocation.
3705  */
3706 static void dm_bandwidth_update(struct amdgpu_device *adev)
3707 {
3708         /* TODO: implement later */
3709 }
3710
3711 static const struct amdgpu_display_funcs dm_display_funcs = {
3712         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3713         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3714         .backlight_set_level = NULL, /* never called for DC */
3715         .backlight_get_level = NULL, /* never called for DC */
3716         .hpd_sense = NULL,/* called unconditionally */
3717         .hpd_set_polarity = NULL, /* called unconditionally */
3718         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3719         .page_flip_get_scanoutpos =
3720                 dm_crtc_get_scanoutpos,/* called unconditionally */
3721         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3722         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3723 };
3724
3725 #if defined(CONFIG_DEBUG_KERNEL_DC)
3726
3727 static ssize_t s3_debug_store(struct device *device,
3728                               struct device_attribute *attr,
3729                               const char *buf,
3730                               size_t count)
3731 {
3732         int ret;
3733         int s3_state;
3734         struct drm_device *drm_dev = dev_get_drvdata(device);
3735         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3736
3737         ret = kstrtoint(buf, 0, &s3_state);
3738
3739         if (ret == 0) {
3740                 if (s3_state) {
3741                         dm_resume(adev);
3742                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3743                 } else
3744                         dm_suspend(adev);
3745         }
3746
3747         return ret == 0 ? count : 0;
3748 }
3749
3750 DEVICE_ATTR_WO(s3_debug);
3751
3752 #endif
3753
3754 static int dm_early_init(void *handle)
3755 {
3756         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3757
3758         switch (adev->asic_type) {
3759 #if defined(CONFIG_DRM_AMD_DC_SI)
3760         case CHIP_TAHITI:
3761         case CHIP_PITCAIRN:
3762         case CHIP_VERDE:
3763                 adev->mode_info.num_crtc = 6;
3764                 adev->mode_info.num_hpd = 6;
3765                 adev->mode_info.num_dig = 6;
3766                 break;
3767         case CHIP_OLAND:
3768                 adev->mode_info.num_crtc = 2;
3769                 adev->mode_info.num_hpd = 2;
3770                 adev->mode_info.num_dig = 2;
3771                 break;
3772 #endif
3773         case CHIP_BONAIRE:
3774         case CHIP_HAWAII:
3775                 adev->mode_info.num_crtc = 6;
3776                 adev->mode_info.num_hpd = 6;
3777                 adev->mode_info.num_dig = 6;
3778                 break;
3779         case CHIP_KAVERI:
3780                 adev->mode_info.num_crtc = 4;
3781                 adev->mode_info.num_hpd = 6;
3782                 adev->mode_info.num_dig = 7;
3783                 break;
3784         case CHIP_KABINI:
3785         case CHIP_MULLINS:
3786                 adev->mode_info.num_crtc = 2;
3787                 adev->mode_info.num_hpd = 6;
3788                 adev->mode_info.num_dig = 6;
3789                 break;
3790         case CHIP_FIJI:
3791         case CHIP_TONGA:
3792                 adev->mode_info.num_crtc = 6;
3793                 adev->mode_info.num_hpd = 6;
3794                 adev->mode_info.num_dig = 7;
3795                 break;
3796         case CHIP_CARRIZO:
3797                 adev->mode_info.num_crtc = 3;
3798                 adev->mode_info.num_hpd = 6;
3799                 adev->mode_info.num_dig = 9;
3800                 break;
3801         case CHIP_STONEY:
3802                 adev->mode_info.num_crtc = 2;
3803                 adev->mode_info.num_hpd = 6;
3804                 adev->mode_info.num_dig = 9;
3805                 break;
3806         case CHIP_POLARIS11:
3807         case CHIP_POLARIS12:
3808                 adev->mode_info.num_crtc = 5;
3809                 adev->mode_info.num_hpd = 5;
3810                 adev->mode_info.num_dig = 5;
3811                 break;
3812         case CHIP_POLARIS10:
3813         case CHIP_VEGAM:
3814                 adev->mode_info.num_crtc = 6;
3815                 adev->mode_info.num_hpd = 6;
3816                 adev->mode_info.num_dig = 6;
3817                 break;
3818         case CHIP_VEGA10:
3819         case CHIP_VEGA12:
3820         case CHIP_VEGA20:
3821                 adev->mode_info.num_crtc = 6;
3822                 adev->mode_info.num_hpd = 6;
3823                 adev->mode_info.num_dig = 6;
3824                 break;
3825 #if defined(CONFIG_DRM_AMD_DC_DCN)
3826         case CHIP_RAVEN:
3827         case CHIP_RENOIR:
3828         case CHIP_VANGOGH:
3829                 adev->mode_info.num_crtc = 4;
3830                 adev->mode_info.num_hpd = 4;
3831                 adev->mode_info.num_dig = 4;
3832                 break;
3833         case CHIP_NAVI10:
3834         case CHIP_NAVI12:
3835         case CHIP_SIENNA_CICHLID:
3836         case CHIP_NAVY_FLOUNDER:
3837                 adev->mode_info.num_crtc = 6;
3838                 adev->mode_info.num_hpd = 6;
3839                 adev->mode_info.num_dig = 6;
3840                 break;
3841         case CHIP_NAVI14:
3842         case CHIP_DIMGREY_CAVEFISH:
3843                 adev->mode_info.num_crtc = 5;
3844                 adev->mode_info.num_hpd = 5;
3845                 adev->mode_info.num_dig = 5;
3846                 break;
3847 #endif
3848         default:
3849                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3850                 return -EINVAL;
3851         }
3852
3853         amdgpu_dm_set_irq_funcs(adev);
3854
3855         if (adev->mode_info.funcs == NULL)
3856                 adev->mode_info.funcs = &dm_display_funcs;
3857
3858         /*
3859          * Note: Do NOT change adev->audio_endpt_rreg and
3860          * adev->audio_endpt_wreg because they are initialised in
3861          * amdgpu_device_init()
3862          */
3863 #if defined(CONFIG_DEBUG_KERNEL_DC)
3864         device_create_file(
3865                 adev_to_drm(adev)->dev,
3866                 &dev_attr_s3_debug);
3867 #endif
3868
3869         return 0;
3870 }
3871
3872 static bool modeset_required(struct drm_crtc_state *crtc_state,
3873                              struct dc_stream_state *new_stream,
3874                              struct dc_stream_state *old_stream)
3875 {
3876         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3877 }
3878
3879 static bool modereset_required(struct drm_crtc_state *crtc_state)
3880 {
3881         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3882 }
3883
3884 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3885 {
3886         drm_encoder_cleanup(encoder);
3887         kfree(encoder);
3888 }
3889
3890 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3891         .destroy = amdgpu_dm_encoder_destroy,
3892 };
3893
3894
3895 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3896                                          struct drm_framebuffer *fb,
3897                                          int *min_downscale, int *max_upscale)
3898 {
3899         struct amdgpu_device *adev = drm_to_adev(dev);
3900         struct dc *dc = adev->dm.dc;
3901         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3902         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3903
3904         switch (fb->format->format) {
3905         case DRM_FORMAT_P010:
3906         case DRM_FORMAT_NV12:
3907         case DRM_FORMAT_NV21:
3908                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3909                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3910                 break;
3911
3912         case DRM_FORMAT_XRGB16161616F:
3913         case DRM_FORMAT_ARGB16161616F:
3914         case DRM_FORMAT_XBGR16161616F:
3915         case DRM_FORMAT_ABGR16161616F:
3916                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3917                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3918                 break;
3919
3920         default:
3921                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3922                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3923                 break;
3924         }
3925
3926         /*
3927          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3928          * scaling factor of 1.0 == 1000 units.
3929          */
3930         if (*max_upscale == 1)
3931                 *max_upscale = 1000;
3932
3933         if (*min_downscale == 1)
3934                 *min_downscale = 1000;
3935 }
3936
3937
3938 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3939                                 struct dc_scaling_info *scaling_info)
3940 {
3941         int scale_w, scale_h, min_downscale, max_upscale;
3942
3943         memset(scaling_info, 0, sizeof(*scaling_info));
3944
3945         /* Source is fixed 16.16 but we ignore mantissa for now... */
3946         scaling_info->src_rect.x = state->src_x >> 16;
3947         scaling_info->src_rect.y = state->src_y >> 16;
3948
3949         scaling_info->src_rect.width = state->src_w >> 16;
3950         if (scaling_info->src_rect.width == 0)
3951                 return -EINVAL;
3952
3953         scaling_info->src_rect.height = state->src_h >> 16;
3954         if (scaling_info->src_rect.height == 0)
3955                 return -EINVAL;
3956
3957         scaling_info->dst_rect.x = state->crtc_x;
3958         scaling_info->dst_rect.y = state->crtc_y;
3959
3960         if (state->crtc_w == 0)
3961                 return -EINVAL;
3962
3963         scaling_info->dst_rect.width = state->crtc_w;
3964
3965         if (state->crtc_h == 0)
3966                 return -EINVAL;
3967
3968         scaling_info->dst_rect.height = state->crtc_h;
3969
3970         /* DRM doesn't specify clipping on destination output. */
3971         scaling_info->clip_rect = scaling_info->dst_rect;
3972
3973         /* Validate scaling per-format with DC plane caps */
3974         if (state->plane && state->plane->dev && state->fb) {
3975                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3976                                              &min_downscale, &max_upscale);
3977         } else {
3978                 min_downscale = 250;
3979                 max_upscale = 16000;
3980         }
3981
3982         scale_w = scaling_info->dst_rect.width * 1000 /
3983                   scaling_info->src_rect.width;
3984
3985         if (scale_w < min_downscale || scale_w > max_upscale)
3986                 return -EINVAL;
3987
3988         scale_h = scaling_info->dst_rect.height * 1000 /
3989                   scaling_info->src_rect.height;
3990
3991         if (scale_h < min_downscale || scale_h > max_upscale)
3992                 return -EINVAL;
3993
3994         /*
3995          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3996          * assume reasonable defaults based on the format.
3997          */
3998
3999         return 0;
4000 }
4001
4002 static void
4003 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4004                                  uint64_t tiling_flags)
4005 {
4006         /* Fill GFX8 params */
4007         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4008                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4009
4010                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4011                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4012                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4013                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4014                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4015
4016                 /* XXX fix me for VI */
4017                 tiling_info->gfx8.num_banks = num_banks;
4018                 tiling_info->gfx8.array_mode =
4019                                 DC_ARRAY_2D_TILED_THIN1;
4020                 tiling_info->gfx8.tile_split = tile_split;
4021                 tiling_info->gfx8.bank_width = bankw;
4022                 tiling_info->gfx8.bank_height = bankh;
4023                 tiling_info->gfx8.tile_aspect = mtaspect;
4024                 tiling_info->gfx8.tile_mode =
4025                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4026         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4027                         == DC_ARRAY_1D_TILED_THIN1) {
4028                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4029         }
4030
4031         tiling_info->gfx8.pipe_config =
4032                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4033 }
4034
4035 static void
4036 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4037                                   union dc_tiling_info *tiling_info)
4038 {
4039         tiling_info->gfx9.num_pipes =
4040                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4041         tiling_info->gfx9.num_banks =
4042                 adev->gfx.config.gb_addr_config_fields.num_banks;
4043         tiling_info->gfx9.pipe_interleave =
4044                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4045         tiling_info->gfx9.num_shader_engines =
4046                 adev->gfx.config.gb_addr_config_fields.num_se;
4047         tiling_info->gfx9.max_compressed_frags =
4048                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4049         tiling_info->gfx9.num_rb_per_se =
4050                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4051         tiling_info->gfx9.shaderEnable = 1;
4052         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4053             adev->asic_type == CHIP_NAVY_FLOUNDER ||
4054             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4055             adev->asic_type == CHIP_VANGOGH)
4056                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4057 }
4058
4059 static int
4060 validate_dcc(struct amdgpu_device *adev,
4061              const enum surface_pixel_format format,
4062              const enum dc_rotation_angle rotation,
4063              const union dc_tiling_info *tiling_info,
4064              const struct dc_plane_dcc_param *dcc,
4065              const struct dc_plane_address *address,
4066              const struct plane_size *plane_size)
4067 {
4068         struct dc *dc = adev->dm.dc;
4069         struct dc_dcc_surface_param input;
4070         struct dc_surface_dcc_cap output;
4071
4072         memset(&input, 0, sizeof(input));
4073         memset(&output, 0, sizeof(output));
4074
4075         if (!dcc->enable)
4076                 return 0;
4077
4078         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4079             !dc->cap_funcs.get_dcc_compression_cap)
4080                 return -EINVAL;
4081
4082         input.format = format;
4083         input.surface_size.width = plane_size->surface_size.width;
4084         input.surface_size.height = plane_size->surface_size.height;
4085         input.swizzle_mode = tiling_info->gfx9.swizzle;
4086
4087         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4088                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4089         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4090                 input.scan = SCAN_DIRECTION_VERTICAL;
4091
4092         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4093                 return -EINVAL;
4094
4095         if (!output.capable)
4096                 return -EINVAL;
4097
4098         if (dcc->independent_64b_blks == 0 &&
4099             output.grph.rgb.independent_64b_blks != 0)
4100                 return -EINVAL;
4101
4102         return 0;
4103 }
4104
4105 static bool
4106 modifier_has_dcc(uint64_t modifier)
4107 {
4108         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4109 }
4110
4111 static unsigned
4112 modifier_gfx9_swizzle_mode(uint64_t modifier)
4113 {
4114         if (modifier == DRM_FORMAT_MOD_LINEAR)
4115                 return 0;
4116
4117         return AMD_FMT_MOD_GET(TILE, modifier);
4118 }
4119
4120 static const struct drm_format_info *
4121 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4122 {
4123         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4124 }
4125
4126 static void
4127 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4128                                     union dc_tiling_info *tiling_info,
4129                                     uint64_t modifier)
4130 {
4131         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4132         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4133         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4134         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4135
4136         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4137
4138         if (!IS_AMD_FMT_MOD(modifier))
4139                 return;
4140
4141         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4142         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4143
4144         if (adev->family >= AMDGPU_FAMILY_NV) {
4145                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4146         } else {
4147                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4148
4149                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4150         }
4151 }
4152
4153 enum dm_micro_swizzle {
4154         MICRO_SWIZZLE_Z = 0,
4155         MICRO_SWIZZLE_S = 1,
4156         MICRO_SWIZZLE_D = 2,
4157         MICRO_SWIZZLE_R = 3
4158 };
4159
4160 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4161                                           uint32_t format,
4162                                           uint64_t modifier)
4163 {
4164         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4165         const struct drm_format_info *info = drm_format_info(format);
4166
4167         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4168
4169         if (!info)
4170                 return false;
4171
4172         /*
4173          * We always have to allow this modifier, because core DRM still
4174          * checks LINEAR support if userspace does not provide modifers.
4175          */
4176         if (modifier == DRM_FORMAT_MOD_LINEAR)
4177                 return true;
4178
4179         /*
4180          * The arbitrary tiling support for multiplane formats has not been hooked
4181          * up.
4182          */
4183         if (info->num_planes > 1)
4184                 return false;
4185
4186         /*
4187          * For D swizzle the canonical modifier depends on the bpp, so check
4188          * it here.
4189          */
4190         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4191             adev->family >= AMDGPU_FAMILY_NV) {
4192                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4193                         return false;
4194         }
4195
4196         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4197             info->cpp[0] < 8)
4198                 return false;
4199
4200         if (modifier_has_dcc(modifier)) {
4201                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4202                 if (info->cpp[0] != 4)
4203                         return false;
4204         }
4205
4206         return true;
4207 }
4208
4209 static void
4210 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4211 {
4212         if (!*mods)
4213                 return;
4214
4215         if (*cap - *size < 1) {
4216                 uint64_t new_cap = *cap * 2;
4217                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4218
4219                 if (!new_mods) {
4220                         kfree(*mods);
4221                         *mods = NULL;
4222                         return;
4223                 }
4224
4225                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4226                 kfree(*mods);
4227                 *mods = new_mods;
4228                 *cap = new_cap;
4229         }
4230
4231         (*mods)[*size] = mod;
4232         *size += 1;
4233 }
4234
4235 static void
4236 add_gfx9_modifiers(const struct amdgpu_device *adev,
4237                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4238 {
4239         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4240         int pipe_xor_bits = min(8, pipes +
4241                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4242         int bank_xor_bits = min(8 - pipe_xor_bits,
4243                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4244         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4245                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4246
4247
4248         if (adev->family == AMDGPU_FAMILY_RV) {
4249                 /* Raven2 and later */
4250                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4251
4252                 /*
4253                  * No _D DCC swizzles yet because we only allow 32bpp, which
4254                  * doesn't support _D on DCN
4255                  */
4256
4257                 if (has_constant_encode) {
4258                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4259                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4260                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4261                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4262                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4263                                     AMD_FMT_MOD_SET(DCC, 1) |
4264                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4265                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4266                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4267                 }
4268
4269                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4270                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4271                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4272                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4273                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4274                             AMD_FMT_MOD_SET(DCC, 1) |
4275                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4276                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4277                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4278
4279                 if (has_constant_encode) {
4280                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4281                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4282                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4283                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4284                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4285                                     AMD_FMT_MOD_SET(DCC, 1) |
4286                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4287                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4288                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4289
4290                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4291                                     AMD_FMT_MOD_SET(RB, rb) |
4292                                     AMD_FMT_MOD_SET(PIPE, pipes));
4293                 }
4294
4295                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4296                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4297                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4298                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4299                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4300                             AMD_FMT_MOD_SET(DCC, 1) |
4301                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4302                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4303                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4304                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4305                             AMD_FMT_MOD_SET(RB, rb) |
4306                             AMD_FMT_MOD_SET(PIPE, pipes));
4307         }
4308
4309         /*
4310          * Only supported for 64bpp on Raven, will be filtered on format in
4311          * dm_plane_format_mod_supported.
4312          */
4313         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4314                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4315                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4316                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4317                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4318
4319         if (adev->family == AMDGPU_FAMILY_RV) {
4320                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4321                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4322                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4323                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4324                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4325         }
4326
4327         /*
4328          * Only supported for 64bpp on Raven, will be filtered on format in
4329          * dm_plane_format_mod_supported.
4330          */
4331         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4332                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4333                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4334
4335         if (adev->family == AMDGPU_FAMILY_RV) {
4336                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4337                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4338                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4339         }
4340 }
4341
4342 static void
4343 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4344                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4345 {
4346         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4347
4348         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4349                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4350                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4351                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4352                     AMD_FMT_MOD_SET(DCC, 1) |
4353                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4354                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4355                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4356
4357         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4358                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4359                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4360                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4361                     AMD_FMT_MOD_SET(DCC, 1) |
4362                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4363                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4364                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4365                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4366
4367         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4368                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4369                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4370                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4371
4372         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4373                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4374                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4375                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4376
4377
4378         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4379         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4380                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4381                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4382
4383         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4384                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4385                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4386 }
4387
4388 static void
4389 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4390                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4391 {
4392         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4393         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4394
4395         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4396                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4397                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4398                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4399                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4400                     AMD_FMT_MOD_SET(DCC, 1) |
4401                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4402                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4403                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4404                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4405
4406         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4407                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4408                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4409                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4410                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4411                     AMD_FMT_MOD_SET(DCC, 1) |
4412                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4413                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4414                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4415                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4416                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4417
4418         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4419                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4420                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4421                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4422                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4423
4424         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4425                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4426                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4427                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4428                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4429
4430         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4431         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4432                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4433                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4434
4435         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4436                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4437                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4438 }
4439
4440 static int
4441 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4442 {
4443         uint64_t size = 0, capacity = 128;
4444         *mods = NULL;
4445
4446         /* We have not hooked up any pre-GFX9 modifiers. */
4447         if (adev->family < AMDGPU_FAMILY_AI)
4448                 return 0;
4449
4450         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4451
4452         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4453                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4454                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4455                 return *mods ? 0 : -ENOMEM;
4456         }
4457
4458         switch (adev->family) {
4459         case AMDGPU_FAMILY_AI:
4460         case AMDGPU_FAMILY_RV:
4461                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4462                 break;
4463         case AMDGPU_FAMILY_NV:
4464         case AMDGPU_FAMILY_VGH:
4465                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4466                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4467                 else
4468                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4469                 break;
4470         }
4471
4472         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4473
4474         /* INVALID marks the end of the list. */
4475         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4476
4477         if (!*mods)
4478                 return -ENOMEM;
4479
4480         return 0;
4481 }
4482
4483 static int
4484 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4485                                           const struct amdgpu_framebuffer *afb,
4486                                           const enum surface_pixel_format format,
4487                                           const enum dc_rotation_angle rotation,
4488                                           const struct plane_size *plane_size,
4489                                           union dc_tiling_info *tiling_info,
4490                                           struct dc_plane_dcc_param *dcc,
4491                                           struct dc_plane_address *address,
4492                                           const bool force_disable_dcc)
4493 {
4494         const uint64_t modifier = afb->base.modifier;
4495         int ret;
4496
4497         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4498         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4499
4500         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4501                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4502
4503                 dcc->enable = 1;
4504                 dcc->meta_pitch = afb->base.pitches[1];
4505                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4506
4507                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4508                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4509         }
4510
4511         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4512         if (ret)
4513                 return ret;
4514
4515         return 0;
4516 }
4517
4518 static int
4519 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4520                              const struct amdgpu_framebuffer *afb,
4521                              const enum surface_pixel_format format,
4522                              const enum dc_rotation_angle rotation,
4523                              const uint64_t tiling_flags,
4524                              union dc_tiling_info *tiling_info,
4525                              struct plane_size *plane_size,
4526                              struct dc_plane_dcc_param *dcc,
4527                              struct dc_plane_address *address,
4528                              bool tmz_surface,
4529                              bool force_disable_dcc)
4530 {
4531         const struct drm_framebuffer *fb = &afb->base;
4532         int ret;
4533
4534         memset(tiling_info, 0, sizeof(*tiling_info));
4535         memset(plane_size, 0, sizeof(*plane_size));
4536         memset(dcc, 0, sizeof(*dcc));
4537         memset(address, 0, sizeof(*address));
4538
4539         address->tmz_surface = tmz_surface;
4540
4541         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4542                 uint64_t addr = afb->address + fb->offsets[0];
4543
4544                 plane_size->surface_size.x = 0;
4545                 plane_size->surface_size.y = 0;
4546                 plane_size->surface_size.width = fb->width;
4547                 plane_size->surface_size.height = fb->height;
4548                 plane_size->surface_pitch =
4549                         fb->pitches[0] / fb->format->cpp[0];
4550
4551                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4552                 address->grph.addr.low_part = lower_32_bits(addr);
4553                 address->grph.addr.high_part = upper_32_bits(addr);
4554         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4555                 uint64_t luma_addr = afb->address + fb->offsets[0];
4556                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4557
4558                 plane_size->surface_size.x = 0;
4559                 plane_size->surface_size.y = 0;
4560                 plane_size->surface_size.width = fb->width;
4561                 plane_size->surface_size.height = fb->height;
4562                 plane_size->surface_pitch =
4563                         fb->pitches[0] / fb->format->cpp[0];
4564
4565                 plane_size->chroma_size.x = 0;
4566                 plane_size->chroma_size.y = 0;
4567                 /* TODO: set these based on surface format */
4568                 plane_size->chroma_size.width = fb->width / 2;
4569                 plane_size->chroma_size.height = fb->height / 2;
4570
4571                 plane_size->chroma_pitch =
4572                         fb->pitches[1] / fb->format->cpp[1];
4573
4574                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4575                 address->video_progressive.luma_addr.low_part =
4576                         lower_32_bits(luma_addr);
4577                 address->video_progressive.luma_addr.high_part =
4578                         upper_32_bits(luma_addr);
4579                 address->video_progressive.chroma_addr.low_part =
4580                         lower_32_bits(chroma_addr);
4581                 address->video_progressive.chroma_addr.high_part =
4582                         upper_32_bits(chroma_addr);
4583         }
4584
4585         if (adev->family >= AMDGPU_FAMILY_AI) {
4586                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4587                                                                 rotation, plane_size,
4588                                                                 tiling_info, dcc,
4589                                                                 address,
4590                                                                 force_disable_dcc);
4591                 if (ret)
4592                         return ret;
4593         } else {
4594                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4595         }
4596
4597         return 0;
4598 }
4599
4600 static void
4601 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4602                                bool *per_pixel_alpha, bool *global_alpha,
4603                                int *global_alpha_value)
4604 {
4605         *per_pixel_alpha = false;
4606         *global_alpha = false;
4607         *global_alpha_value = 0xff;
4608
4609         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4610                 return;
4611
4612         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4613                 static const uint32_t alpha_formats[] = {
4614                         DRM_FORMAT_ARGB8888,
4615                         DRM_FORMAT_RGBA8888,
4616                         DRM_FORMAT_ABGR8888,
4617                 };
4618                 uint32_t format = plane_state->fb->format->format;
4619                 unsigned int i;
4620
4621                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4622                         if (format == alpha_formats[i]) {
4623                                 *per_pixel_alpha = true;
4624                                 break;
4625                         }
4626                 }
4627         }
4628
4629         if (plane_state->alpha < 0xffff) {
4630                 *global_alpha = true;
4631                 *global_alpha_value = plane_state->alpha >> 8;
4632         }
4633 }
4634
4635 static int
4636 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4637                             const enum surface_pixel_format format,
4638                             enum dc_color_space *color_space)
4639 {
4640         bool full_range;
4641
4642         *color_space = COLOR_SPACE_SRGB;
4643
4644         /* DRM color properties only affect non-RGB formats. */
4645         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4646                 return 0;
4647
4648         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4649
4650         switch (plane_state->color_encoding) {
4651         case DRM_COLOR_YCBCR_BT601:
4652                 if (full_range)
4653                         *color_space = COLOR_SPACE_YCBCR601;
4654                 else
4655                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4656                 break;
4657
4658         case DRM_COLOR_YCBCR_BT709:
4659                 if (full_range)
4660                         *color_space = COLOR_SPACE_YCBCR709;
4661                 else
4662                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4663                 break;
4664
4665         case DRM_COLOR_YCBCR_BT2020:
4666                 if (full_range)
4667                         *color_space = COLOR_SPACE_2020_YCBCR;
4668                 else
4669                         return -EINVAL;
4670                 break;
4671
4672         default:
4673                 return -EINVAL;
4674         }
4675
4676         return 0;
4677 }
4678
4679 static int
4680 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4681                             const struct drm_plane_state *plane_state,
4682                             const uint64_t tiling_flags,
4683                             struct dc_plane_info *plane_info,
4684                             struct dc_plane_address *address,
4685                             bool tmz_surface,
4686                             bool force_disable_dcc)
4687 {
4688         const struct drm_framebuffer *fb = plane_state->fb;
4689         const struct amdgpu_framebuffer *afb =
4690                 to_amdgpu_framebuffer(plane_state->fb);
4691         int ret;
4692
4693         memset(plane_info, 0, sizeof(*plane_info));
4694
4695         switch (fb->format->format) {
4696         case DRM_FORMAT_C8:
4697                 plane_info->format =
4698                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4699                 break;
4700         case DRM_FORMAT_RGB565:
4701                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4702                 break;
4703         case DRM_FORMAT_XRGB8888:
4704         case DRM_FORMAT_ARGB8888:
4705                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4706                 break;
4707         case DRM_FORMAT_XRGB2101010:
4708         case DRM_FORMAT_ARGB2101010:
4709                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4710                 break;
4711         case DRM_FORMAT_XBGR2101010:
4712         case DRM_FORMAT_ABGR2101010:
4713                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4714                 break;
4715         case DRM_FORMAT_XBGR8888:
4716         case DRM_FORMAT_ABGR8888:
4717                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4718                 break;
4719         case DRM_FORMAT_NV21:
4720                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4721                 break;
4722         case DRM_FORMAT_NV12:
4723                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4724                 break;
4725         case DRM_FORMAT_P010:
4726                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4727                 break;
4728         case DRM_FORMAT_XRGB16161616F:
4729         case DRM_FORMAT_ARGB16161616F:
4730                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4731                 break;
4732         case DRM_FORMAT_XBGR16161616F:
4733         case DRM_FORMAT_ABGR16161616F:
4734                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4735                 break;
4736         default:
4737                 DRM_ERROR(
4738                         "Unsupported screen format %p4cc\n",
4739                         &fb->format->format);
4740                 return -EINVAL;
4741         }
4742
4743         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4744         case DRM_MODE_ROTATE_0:
4745                 plane_info->rotation = ROTATION_ANGLE_0;
4746                 break;
4747         case DRM_MODE_ROTATE_90:
4748                 plane_info->rotation = ROTATION_ANGLE_90;
4749                 break;
4750         case DRM_MODE_ROTATE_180:
4751                 plane_info->rotation = ROTATION_ANGLE_180;
4752                 break;
4753         case DRM_MODE_ROTATE_270:
4754                 plane_info->rotation = ROTATION_ANGLE_270;
4755                 break;
4756         default:
4757                 plane_info->rotation = ROTATION_ANGLE_0;
4758                 break;
4759         }
4760
4761         plane_info->visible = true;
4762         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4763
4764         plane_info->layer_index = 0;
4765
4766         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4767                                           &plane_info->color_space);
4768         if (ret)
4769                 return ret;
4770
4771         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4772                                            plane_info->rotation, tiling_flags,
4773                                            &plane_info->tiling_info,
4774                                            &plane_info->plane_size,
4775                                            &plane_info->dcc, address, tmz_surface,
4776                                            force_disable_dcc);
4777         if (ret)
4778                 return ret;
4779
4780         fill_blending_from_plane_state(
4781                 plane_state, &plane_info->per_pixel_alpha,
4782                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4783
4784         return 0;
4785 }
4786
4787 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4788                                     struct dc_plane_state *dc_plane_state,
4789                                     struct drm_plane_state *plane_state,
4790                                     struct drm_crtc_state *crtc_state)
4791 {
4792         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4793         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4794         struct dc_scaling_info scaling_info;
4795         struct dc_plane_info plane_info;
4796         int ret;
4797         bool force_disable_dcc = false;
4798
4799         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4800         if (ret)
4801                 return ret;
4802
4803         dc_plane_state->src_rect = scaling_info.src_rect;
4804         dc_plane_state->dst_rect = scaling_info.dst_rect;
4805         dc_plane_state->clip_rect = scaling_info.clip_rect;
4806         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4807
4808         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4809         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4810                                           afb->tiling_flags,
4811                                           &plane_info,
4812                                           &dc_plane_state->address,
4813                                           afb->tmz_surface,
4814                                           force_disable_dcc);
4815         if (ret)
4816                 return ret;
4817
4818         dc_plane_state->format = plane_info.format;
4819         dc_plane_state->color_space = plane_info.color_space;
4820         dc_plane_state->format = plane_info.format;
4821         dc_plane_state->plane_size = plane_info.plane_size;
4822         dc_plane_state->rotation = plane_info.rotation;
4823         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4824         dc_plane_state->stereo_format = plane_info.stereo_format;
4825         dc_plane_state->tiling_info = plane_info.tiling_info;
4826         dc_plane_state->visible = plane_info.visible;
4827         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4828         dc_plane_state->global_alpha = plane_info.global_alpha;
4829         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4830         dc_plane_state->dcc = plane_info.dcc;
4831         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4832         dc_plane_state->flip_int_enabled = true;
4833
4834         /*
4835          * Always set input transfer function, since plane state is refreshed
4836          * every time.
4837          */
4838         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4839         if (ret)
4840                 return ret;
4841
4842         return 0;
4843 }
4844
4845 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4846                                            const struct dm_connector_state *dm_state,
4847                                            struct dc_stream_state *stream)
4848 {
4849         enum amdgpu_rmx_type rmx_type;
4850
4851         struct rect src = { 0 }; /* viewport in composition space*/
4852         struct rect dst = { 0 }; /* stream addressable area */
4853
4854         /* no mode. nothing to be done */
4855         if (!mode)
4856                 return;
4857
4858         /* Full screen scaling by default */
4859         src.width = mode->hdisplay;
4860         src.height = mode->vdisplay;
4861         dst.width = stream->timing.h_addressable;
4862         dst.height = stream->timing.v_addressable;
4863
4864         if (dm_state) {
4865                 rmx_type = dm_state->scaling;
4866                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4867                         if (src.width * dst.height <
4868                                         src.height * dst.width) {
4869                                 /* height needs less upscaling/more downscaling */
4870                                 dst.width = src.width *
4871                                                 dst.height / src.height;
4872                         } else {
4873                                 /* width needs less upscaling/more downscaling */
4874                                 dst.height = src.height *
4875                                                 dst.width / src.width;
4876                         }
4877                 } else if (rmx_type == RMX_CENTER) {
4878                         dst = src;
4879                 }
4880
4881                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4882                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4883
4884                 if (dm_state->underscan_enable) {
4885                         dst.x += dm_state->underscan_hborder / 2;
4886                         dst.y += dm_state->underscan_vborder / 2;
4887                         dst.width -= dm_state->underscan_hborder;
4888                         dst.height -= dm_state->underscan_vborder;
4889                 }
4890         }
4891
4892         stream->src = src;
4893         stream->dst = dst;
4894
4895         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4896                         dst.x, dst.y, dst.width, dst.height);
4897
4898 }
4899
4900 static enum dc_color_depth
4901 convert_color_depth_from_display_info(const struct drm_connector *connector,
4902                                       bool is_y420, int requested_bpc)
4903 {
4904         uint8_t bpc;
4905
4906         if (is_y420) {
4907                 bpc = 8;
4908
4909                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4910                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4911                         bpc = 16;
4912                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4913                         bpc = 12;
4914                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4915                         bpc = 10;
4916         } else {
4917                 bpc = (uint8_t)connector->display_info.bpc;
4918                 /* Assume 8 bpc by default if no bpc is specified. */
4919                 bpc = bpc ? bpc : 8;
4920         }
4921
4922         if (requested_bpc > 0) {
4923                 /*
4924                  * Cap display bpc based on the user requested value.
4925                  *
4926                  * The value for state->max_bpc may not correctly updated
4927                  * depending on when the connector gets added to the state
4928                  * or if this was called outside of atomic check, so it
4929                  * can't be used directly.
4930                  */
4931                 bpc = min_t(u8, bpc, requested_bpc);
4932
4933                 /* Round down to the nearest even number. */
4934                 bpc = bpc - (bpc & 1);
4935         }
4936
4937         switch (bpc) {
4938         case 0:
4939                 /*
4940                  * Temporary Work around, DRM doesn't parse color depth for
4941                  * EDID revision before 1.4
4942                  * TODO: Fix edid parsing
4943                  */
4944                 return COLOR_DEPTH_888;
4945         case 6:
4946                 return COLOR_DEPTH_666;
4947         case 8:
4948                 return COLOR_DEPTH_888;
4949         case 10:
4950                 return COLOR_DEPTH_101010;
4951         case 12:
4952                 return COLOR_DEPTH_121212;
4953         case 14:
4954                 return COLOR_DEPTH_141414;
4955         case 16:
4956                 return COLOR_DEPTH_161616;
4957         default:
4958                 return COLOR_DEPTH_UNDEFINED;
4959         }
4960 }
4961
4962 static enum dc_aspect_ratio
4963 get_aspect_ratio(const struct drm_display_mode *mode_in)
4964 {
4965         /* 1-1 mapping, since both enums follow the HDMI spec. */
4966         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4967 }
4968
4969 static enum dc_color_space
4970 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4971 {
4972         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4973
4974         switch (dc_crtc_timing->pixel_encoding) {
4975         case PIXEL_ENCODING_YCBCR422:
4976         case PIXEL_ENCODING_YCBCR444:
4977         case PIXEL_ENCODING_YCBCR420:
4978         {
4979                 /*
4980                  * 27030khz is the separation point between HDTV and SDTV
4981                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4982                  * respectively
4983                  */
4984                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4985                         if (dc_crtc_timing->flags.Y_ONLY)
4986                                 color_space =
4987                                         COLOR_SPACE_YCBCR709_LIMITED;
4988                         else
4989                                 color_space = COLOR_SPACE_YCBCR709;
4990                 } else {
4991                         if (dc_crtc_timing->flags.Y_ONLY)
4992                                 color_space =
4993                                         COLOR_SPACE_YCBCR601_LIMITED;
4994                         else
4995                                 color_space = COLOR_SPACE_YCBCR601;
4996                 }
4997
4998         }
4999         break;
5000         case PIXEL_ENCODING_RGB:
5001                 color_space = COLOR_SPACE_SRGB;
5002                 break;
5003
5004         default:
5005                 WARN_ON(1);
5006                 break;
5007         }
5008
5009         return color_space;
5010 }
5011
5012 static bool adjust_colour_depth_from_display_info(
5013         struct dc_crtc_timing *timing_out,
5014         const struct drm_display_info *info)
5015 {
5016         enum dc_color_depth depth = timing_out->display_color_depth;
5017         int normalized_clk;
5018         do {
5019                 normalized_clk = timing_out->pix_clk_100hz / 10;
5020                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5021                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5022                         normalized_clk /= 2;
5023                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5024                 switch (depth) {
5025                 case COLOR_DEPTH_888:
5026                         break;
5027                 case COLOR_DEPTH_101010:
5028                         normalized_clk = (normalized_clk * 30) / 24;
5029                         break;
5030                 case COLOR_DEPTH_121212:
5031                         normalized_clk = (normalized_clk * 36) / 24;
5032                         break;
5033                 case COLOR_DEPTH_161616:
5034                         normalized_clk = (normalized_clk * 48) / 24;
5035                         break;
5036                 default:
5037                         /* The above depths are the only ones valid for HDMI. */
5038                         return false;
5039                 }
5040                 if (normalized_clk <= info->max_tmds_clock) {
5041                         timing_out->display_color_depth = depth;
5042                         return true;
5043                 }
5044         } while (--depth > COLOR_DEPTH_666);
5045         return false;
5046 }
5047
5048 static void fill_stream_properties_from_drm_display_mode(
5049         struct dc_stream_state *stream,
5050         const struct drm_display_mode *mode_in,
5051         const struct drm_connector *connector,
5052         const struct drm_connector_state *connector_state,
5053         const struct dc_stream_state *old_stream,
5054         int requested_bpc)
5055 {
5056         struct dc_crtc_timing *timing_out = &stream->timing;
5057         const struct drm_display_info *info = &connector->display_info;
5058         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5059         struct hdmi_vendor_infoframe hv_frame;
5060         struct hdmi_avi_infoframe avi_frame;
5061
5062         memset(&hv_frame, 0, sizeof(hv_frame));
5063         memset(&avi_frame, 0, sizeof(avi_frame));
5064
5065         timing_out->h_border_left = 0;
5066         timing_out->h_border_right = 0;
5067         timing_out->v_border_top = 0;
5068         timing_out->v_border_bottom = 0;
5069         /* TODO: un-hardcode */
5070         if (drm_mode_is_420_only(info, mode_in)
5071                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5072                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5073         else if (drm_mode_is_420_also(info, mode_in)
5074                         && aconnector->force_yuv420_output)
5075                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5076         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5077                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5078                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5079         else
5080                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5081
5082         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5083         timing_out->display_color_depth = convert_color_depth_from_display_info(
5084                 connector,
5085                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5086                 requested_bpc);
5087         timing_out->scan_type = SCANNING_TYPE_NODATA;
5088         timing_out->hdmi_vic = 0;
5089
5090         if(old_stream) {
5091                 timing_out->vic = old_stream->timing.vic;
5092                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5093                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5094         } else {
5095                 timing_out->vic = drm_match_cea_mode(mode_in);
5096                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5097                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5098                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5099                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5100         }
5101
5102         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5103                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5104                 timing_out->vic = avi_frame.video_code;
5105                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5106                 timing_out->hdmi_vic = hv_frame.vic;
5107         }
5108
5109         timing_out->h_addressable = mode_in->hdisplay;
5110         timing_out->h_total = mode_in->htotal;
5111         timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5112         timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5113         timing_out->v_total = mode_in->vtotal;
5114         timing_out->v_addressable = mode_in->vdisplay;
5115         timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5116         timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5117         timing_out->pix_clk_100hz = mode_in->clock * 10;
5118
5119         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5120
5121         stream->output_color_space = get_output_color_space(timing_out);
5122
5123         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5124         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5125         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5126                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5127                     drm_mode_is_420_also(info, mode_in) &&
5128                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5129                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5130                         adjust_colour_depth_from_display_info(timing_out, info);
5131                 }
5132         }
5133 }
5134
5135 static void fill_audio_info(struct audio_info *audio_info,
5136                             const struct drm_connector *drm_connector,
5137                             const struct dc_sink *dc_sink)
5138 {
5139         int i = 0;
5140         int cea_revision = 0;
5141         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5142
5143         audio_info->manufacture_id = edid_caps->manufacturer_id;
5144         audio_info->product_id = edid_caps->product_id;
5145
5146         cea_revision = drm_connector->display_info.cea_rev;
5147
5148         strscpy(audio_info->display_name,
5149                 edid_caps->display_name,
5150                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5151
5152         if (cea_revision >= 3) {
5153                 audio_info->mode_count = edid_caps->audio_mode_count;
5154
5155                 for (i = 0; i < audio_info->mode_count; ++i) {
5156                         audio_info->modes[i].format_code =
5157                                         (enum audio_format_code)
5158                                         (edid_caps->audio_modes[i].format_code);
5159                         audio_info->modes[i].channel_count =
5160                                         edid_caps->audio_modes[i].channel_count;
5161                         audio_info->modes[i].sample_rates.all =
5162                                         edid_caps->audio_modes[i].sample_rate;
5163                         audio_info->modes[i].sample_size =
5164                                         edid_caps->audio_modes[i].sample_size;
5165                 }
5166         }
5167
5168         audio_info->flags.all = edid_caps->speaker_flags;
5169
5170         /* TODO: We only check for the progressive mode, check for interlace mode too */
5171         if (drm_connector->latency_present[0]) {
5172                 audio_info->video_latency = drm_connector->video_latency[0];
5173                 audio_info->audio_latency = drm_connector->audio_latency[0];
5174         }
5175
5176         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5177
5178 }
5179
5180 static void
5181 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5182                                       struct drm_display_mode *dst_mode)
5183 {
5184         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5185         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5186         dst_mode->crtc_clock = src_mode->crtc_clock;
5187         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5188         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5189         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5190         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5191         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5192         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5193         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5194         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5195         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5196         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5197         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5198 }
5199
5200 static void
5201 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5202                                         const struct drm_display_mode *native_mode,
5203                                         bool scale_enabled)
5204 {
5205         if (scale_enabled) {
5206                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5207         } else if (native_mode->clock == drm_mode->clock &&
5208                         native_mode->htotal == drm_mode->htotal &&
5209                         native_mode->vtotal == drm_mode->vtotal) {
5210                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5211         } else {
5212                 /* no scaling nor amdgpu inserted, no need to patch */
5213         }
5214 }
5215
5216 static struct dc_sink *
5217 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5218 {
5219         struct dc_sink_init_data sink_init_data = { 0 };
5220         struct dc_sink *sink = NULL;
5221         sink_init_data.link = aconnector->dc_link;
5222         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5223
5224         sink = dc_sink_create(&sink_init_data);
5225         if (!sink) {
5226                 DRM_ERROR("Failed to create sink!\n");
5227                 return NULL;
5228         }
5229         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5230
5231         return sink;
5232 }
5233
5234 static void set_multisync_trigger_params(
5235                 struct dc_stream_state *stream)
5236 {
5237         if (stream->triggered_crtc_reset.enabled) {
5238                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5239                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5240         }
5241 }
5242
5243 static void set_master_stream(struct dc_stream_state *stream_set[],
5244                               int stream_count)
5245 {
5246         int j, highest_rfr = 0, master_stream = 0;
5247
5248         for (j = 0;  j < stream_count; j++) {
5249                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5250                         int refresh_rate = 0;
5251
5252                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5253                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5254                         if (refresh_rate > highest_rfr) {
5255                                 highest_rfr = refresh_rate;
5256                                 master_stream = j;
5257                         }
5258                 }
5259         }
5260         for (j = 0;  j < stream_count; j++) {
5261                 if (stream_set[j])
5262                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5263         }
5264 }
5265
5266 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5267 {
5268         int i = 0;
5269
5270         if (context->stream_count < 2)
5271                 return;
5272         for (i = 0; i < context->stream_count ; i++) {
5273                 if (!context->streams[i])
5274                         continue;
5275                 /*
5276                  * TODO: add a function to read AMD VSDB bits and set
5277                  * crtc_sync_master.multi_sync_enabled flag
5278                  * For now it's set to false
5279                  */
5280                 set_multisync_trigger_params(context->streams[i]);
5281         }
5282         set_master_stream(context->streams, context->stream_count);
5283 }
5284
5285 static struct drm_display_mode *
5286 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5287                           bool use_probed_modes)
5288 {
5289         struct drm_display_mode *m, *m_pref = NULL;
5290         u16 current_refresh, highest_refresh;
5291         struct list_head *list_head = use_probed_modes ?
5292                                                     &aconnector->base.probed_modes :
5293                                                     &aconnector->base.modes;
5294
5295         if (aconnector->freesync_vid_base.clock != 0)
5296                 return &aconnector->freesync_vid_base;
5297
5298         /* Find the preferred mode */
5299         list_for_each_entry (m, list_head, head) {
5300                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5301                         m_pref = m;
5302                         break;
5303                 }
5304         }
5305
5306         if (!m_pref) {
5307                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5308                 m_pref = list_first_entry_or_null(
5309                         &aconnector->base.modes, struct drm_display_mode, head);
5310                 if (!m_pref) {
5311                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5312                         return NULL;
5313                 }
5314         }
5315
5316         highest_refresh = drm_mode_vrefresh(m_pref);
5317
5318         /*
5319          * Find the mode with highest refresh rate with same resolution.
5320          * For some monitors, preferred mode is not the mode with highest
5321          * supported refresh rate.
5322          */
5323         list_for_each_entry (m, list_head, head) {
5324                 current_refresh  = drm_mode_vrefresh(m);
5325
5326                 if (m->hdisplay == m_pref->hdisplay &&
5327                     m->vdisplay == m_pref->vdisplay &&
5328                     highest_refresh < current_refresh) {
5329                         highest_refresh = current_refresh;
5330                         m_pref = m;
5331                 }
5332         }
5333
5334         aconnector->freesync_vid_base = *m_pref;
5335         return m_pref;
5336 }
5337
5338 static bool is_freesync_video_mode(struct drm_display_mode *mode,
5339                                    struct amdgpu_dm_connector *aconnector)
5340 {
5341         struct drm_display_mode *high_mode;
5342         int timing_diff;
5343
5344         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5345         if (!high_mode || !mode)
5346                 return false;
5347
5348         timing_diff = high_mode->vtotal - mode->vtotal;
5349
5350         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5351             high_mode->hdisplay != mode->hdisplay ||
5352             high_mode->vdisplay != mode->vdisplay ||
5353             high_mode->hsync_start != mode->hsync_start ||
5354             high_mode->hsync_end != mode->hsync_end ||
5355             high_mode->htotal != mode->htotal ||
5356             high_mode->hskew != mode->hskew ||
5357             high_mode->vscan != mode->vscan ||
5358             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5359             high_mode->vsync_end - mode->vsync_end != timing_diff)
5360                 return false;
5361         else
5362                 return true;
5363 }
5364
5365 static struct dc_stream_state *
5366 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5367                        const struct drm_display_mode *drm_mode,
5368                        const struct dm_connector_state *dm_state,
5369                        const struct dc_stream_state *old_stream,
5370                        int requested_bpc)
5371 {
5372         struct drm_display_mode *preferred_mode = NULL;
5373         struct drm_connector *drm_connector;
5374         const struct drm_connector_state *con_state =
5375                 dm_state ? &dm_state->base : NULL;
5376         struct dc_stream_state *stream = NULL;
5377         struct drm_display_mode mode = *drm_mode;
5378         struct drm_display_mode saved_mode;
5379         struct drm_display_mode *freesync_mode = NULL;
5380         bool native_mode_found = false;
5381         bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5382         int mode_refresh;
5383         int preferred_refresh = 0;
5384 #if defined(CONFIG_DRM_AMD_DC_DCN)
5385         struct dsc_dec_dpcd_caps dsc_caps;
5386         uint32_t link_bandwidth_kbps;
5387 #endif
5388         struct dc_sink *sink = NULL;
5389
5390         memset(&saved_mode, 0, sizeof(saved_mode));
5391
5392         if (aconnector == NULL) {
5393                 DRM_ERROR("aconnector is NULL!\n");
5394                 return stream;
5395         }
5396
5397         drm_connector = &aconnector->base;
5398
5399         if (!aconnector->dc_sink) {
5400                 sink = create_fake_sink(aconnector);
5401                 if (!sink)
5402                         return stream;
5403         } else {
5404                 sink = aconnector->dc_sink;
5405                 dc_sink_retain(sink);
5406         }
5407
5408         stream = dc_create_stream_for_sink(sink);
5409
5410         if (stream == NULL) {
5411                 DRM_ERROR("Failed to create stream for sink!\n");
5412                 goto finish;
5413         }
5414
5415         stream->dm_stream_context = aconnector;
5416
5417         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5418                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5419
5420         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5421                 /* Search for preferred mode */
5422                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5423                         native_mode_found = true;
5424                         break;
5425                 }
5426         }
5427         if (!native_mode_found)
5428                 preferred_mode = list_first_entry_or_null(
5429                                 &aconnector->base.modes,
5430                                 struct drm_display_mode,
5431                                 head);
5432
5433         mode_refresh = drm_mode_vrefresh(&mode);
5434
5435         if (preferred_mode == NULL) {
5436                 /*
5437                  * This may not be an error, the use case is when we have no
5438                  * usermode calls to reset and set mode upon hotplug. In this
5439                  * case, we call set mode ourselves to restore the previous mode
5440                  * and the modelist may not be filled in in time.
5441                  */
5442                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5443         } else {
5444                 recalculate_timing |= amdgpu_freesync_vid_mode &&
5445                                  is_freesync_video_mode(&mode, aconnector);
5446                 if (recalculate_timing) {
5447                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
5448                         saved_mode = mode;
5449                         mode = *freesync_mode;
5450                 } else {
5451                         decide_crtc_timing_for_drm_display_mode(
5452                                 &mode, preferred_mode,
5453                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5454                 }
5455
5456                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5457         }
5458
5459         if (recalculate_timing)
5460                 drm_mode_set_crtcinfo(&saved_mode, 0);
5461         else
5462                 drm_mode_set_crtcinfo(&mode, 0);
5463
5464        /*
5465         * If scaling is enabled and refresh rate didn't change
5466         * we copy the vic and polarities of the old timings
5467         */
5468         if (!recalculate_timing || mode_refresh != preferred_refresh)
5469                 fill_stream_properties_from_drm_display_mode(
5470                         stream, &mode, &aconnector->base, con_state, NULL,
5471                         requested_bpc);
5472         else
5473                 fill_stream_properties_from_drm_display_mode(
5474                         stream, &mode, &aconnector->base, con_state, old_stream,
5475                         requested_bpc);
5476
5477         stream->timing.flags.DSC = 0;
5478
5479         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5480 #if defined(CONFIG_DRM_AMD_DC_DCN)
5481                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5482                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5483                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5484                                       &dsc_caps);
5485                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5486                                                              dc_link_get_link_cap(aconnector->dc_link));
5487
5488                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5489                         /* Set DSC policy according to dsc_clock_en */
5490                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5491                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5492
5493                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5494                                                   &dsc_caps,
5495                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5496                                                   0,
5497                                                   link_bandwidth_kbps,
5498                                                   &stream->timing,
5499                                                   &stream->timing.dsc_cfg))
5500                                 stream->timing.flags.DSC = 1;
5501                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5502                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5503                                 stream->timing.flags.DSC = 1;
5504
5505                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5506                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5507
5508                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5509                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5510
5511                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5512                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5513                 }
5514 #endif
5515         }
5516
5517         update_stream_scaling_settings(&mode, dm_state, stream);
5518
5519         fill_audio_info(
5520                 &stream->audio_info,
5521                 drm_connector,
5522                 sink);
5523
5524         update_stream_signal(stream, sink);
5525
5526         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5527                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5528
5529         if (stream->link->psr_settings.psr_feature_enabled) {
5530                 //
5531                 // should decide stream support vsc sdp colorimetry capability
5532                 // before building vsc info packet
5533                 //
5534                 stream->use_vsc_sdp_for_colorimetry = false;
5535                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5536                         stream->use_vsc_sdp_for_colorimetry =
5537                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5538                 } else {
5539                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5540                                 stream->use_vsc_sdp_for_colorimetry = true;
5541                 }
5542                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5543         }
5544 finish:
5545         dc_sink_release(sink);
5546
5547         return stream;
5548 }
5549
5550 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5551 {
5552         drm_crtc_cleanup(crtc);
5553         kfree(crtc);
5554 }
5555
5556 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5557                                   struct drm_crtc_state *state)
5558 {
5559         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5560
5561         /* TODO Destroy dc_stream objects are stream object is flattened */
5562         if (cur->stream)
5563                 dc_stream_release(cur->stream);
5564
5565
5566         __drm_atomic_helper_crtc_destroy_state(state);
5567
5568
5569         kfree(state);
5570 }
5571
5572 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5573 {
5574         struct dm_crtc_state *state;
5575
5576         if (crtc->state)
5577                 dm_crtc_destroy_state(crtc, crtc->state);
5578
5579         state = kzalloc(sizeof(*state), GFP_KERNEL);
5580         if (WARN_ON(!state))
5581                 return;
5582
5583         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5584 }
5585
5586 static struct drm_crtc_state *
5587 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5588 {
5589         struct dm_crtc_state *state, *cur;
5590
5591         cur = to_dm_crtc_state(crtc->state);
5592
5593         if (WARN_ON(!crtc->state))
5594                 return NULL;
5595
5596         state = kzalloc(sizeof(*state), GFP_KERNEL);
5597         if (!state)
5598                 return NULL;
5599
5600         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5601
5602         if (cur->stream) {
5603                 state->stream = cur->stream;
5604                 dc_stream_retain(state->stream);
5605         }
5606
5607         state->active_planes = cur->active_planes;
5608         state->vrr_infopacket = cur->vrr_infopacket;
5609         state->abm_level = cur->abm_level;
5610         state->vrr_supported = cur->vrr_supported;
5611         state->freesync_config = cur->freesync_config;
5612         state->cm_has_degamma = cur->cm_has_degamma;
5613         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5614         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5615
5616         return &state->base;
5617 }
5618
5619 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
5620 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
5621 {
5622         crtc_debugfs_init(crtc);
5623
5624         return 0;
5625 }
5626 #endif
5627
5628 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5629 {
5630         enum dc_irq_source irq_source;
5631         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5632         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5633         int rc;
5634
5635         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5636
5637         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5638
5639         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5640                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5641         return rc;
5642 }
5643
5644 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5645 {
5646         enum dc_irq_source irq_source;
5647         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5648         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5649         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5650 #if defined(CONFIG_DRM_AMD_DC_DCN)
5651         struct amdgpu_display_manager *dm = &adev->dm;
5652         unsigned long flags;
5653 #endif
5654         int rc = 0;
5655
5656         if (enable) {
5657                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5658                 if (amdgpu_dm_vrr_active(acrtc_state))
5659                         rc = dm_set_vupdate_irq(crtc, true);
5660         } else {
5661                 /* vblank irq off -> vupdate irq off */
5662                 rc = dm_set_vupdate_irq(crtc, false);
5663         }
5664
5665         if (rc)
5666                 return rc;
5667
5668         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5669
5670         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5671                 return -EBUSY;
5672
5673         if (amdgpu_in_reset(adev))
5674                 return 0;
5675
5676 #if defined(CONFIG_DRM_AMD_DC_DCN)
5677         spin_lock_irqsave(&dm->vblank_lock, flags);
5678         dm->vblank_workqueue->dm = dm;
5679         dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
5680         dm->vblank_workqueue->enable = enable;
5681         spin_unlock_irqrestore(&dm->vblank_lock, flags);
5682         schedule_work(&dm->vblank_workqueue->mall_work);
5683 #endif
5684
5685         return 0;
5686 }
5687
5688 static int dm_enable_vblank(struct drm_crtc *crtc)
5689 {
5690         return dm_set_vblank(crtc, true);
5691 }
5692
5693 static void dm_disable_vblank(struct drm_crtc *crtc)
5694 {
5695         dm_set_vblank(crtc, false);
5696 }
5697
5698 /* Implemented only the options currently availible for the driver */
5699 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5700         .reset = dm_crtc_reset_state,
5701         .destroy = amdgpu_dm_crtc_destroy,
5702         .set_config = drm_atomic_helper_set_config,
5703         .page_flip = drm_atomic_helper_page_flip,
5704         .atomic_duplicate_state = dm_crtc_duplicate_state,
5705         .atomic_destroy_state = dm_crtc_destroy_state,
5706         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5707         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5708         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5709         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5710         .enable_vblank = dm_enable_vblank,
5711         .disable_vblank = dm_disable_vblank,
5712         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5713 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
5714         .late_register = amdgpu_dm_crtc_late_register,
5715 #endif
5716 };
5717
5718 static enum drm_connector_status
5719 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5720 {
5721         bool connected;
5722         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5723
5724         /*
5725          * Notes:
5726          * 1. This interface is NOT called in context of HPD irq.
5727          * 2. This interface *is called* in context of user-mode ioctl. Which
5728          * makes it a bad place for *any* MST-related activity.
5729          */
5730
5731         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5732             !aconnector->fake_enable)
5733                 connected = (aconnector->dc_sink != NULL);
5734         else
5735                 connected = (aconnector->base.force == DRM_FORCE_ON);
5736
5737         update_subconnector_property(aconnector);
5738
5739         return (connected ? connector_status_connected :
5740                         connector_status_disconnected);
5741 }
5742
5743 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5744                                             struct drm_connector_state *connector_state,
5745                                             struct drm_property *property,
5746                                             uint64_t val)
5747 {
5748         struct drm_device *dev = connector->dev;
5749         struct amdgpu_device *adev = drm_to_adev(dev);
5750         struct dm_connector_state *dm_old_state =
5751                 to_dm_connector_state(connector->state);
5752         struct dm_connector_state *dm_new_state =
5753                 to_dm_connector_state(connector_state);
5754
5755         int ret = -EINVAL;
5756
5757         if (property == dev->mode_config.scaling_mode_property) {
5758                 enum amdgpu_rmx_type rmx_type;
5759
5760                 switch (val) {
5761                 case DRM_MODE_SCALE_CENTER:
5762                         rmx_type = RMX_CENTER;
5763                         break;
5764                 case DRM_MODE_SCALE_ASPECT:
5765                         rmx_type = RMX_ASPECT;
5766                         break;
5767                 case DRM_MODE_SCALE_FULLSCREEN:
5768                         rmx_type = RMX_FULL;
5769                         break;
5770                 case DRM_MODE_SCALE_NONE:
5771                 default:
5772                         rmx_type = RMX_OFF;
5773                         break;
5774                 }
5775
5776                 if (dm_old_state->scaling == rmx_type)
5777                         return 0;
5778
5779                 dm_new_state->scaling = rmx_type;
5780                 ret = 0;
5781         } else if (property == adev->mode_info.underscan_hborder_property) {
5782                 dm_new_state->underscan_hborder = val;
5783                 ret = 0;
5784         } else if (property == adev->mode_info.underscan_vborder_property) {
5785                 dm_new_state->underscan_vborder = val;
5786                 ret = 0;
5787         } else if (property == adev->mode_info.underscan_property) {
5788                 dm_new_state->underscan_enable = val;
5789                 ret = 0;
5790         } else if (property == adev->mode_info.abm_level_property) {
5791                 dm_new_state->abm_level = val;
5792                 ret = 0;
5793         }
5794
5795         return ret;
5796 }
5797
5798 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5799                                             const struct drm_connector_state *state,
5800                                             struct drm_property *property,
5801                                             uint64_t *val)
5802 {
5803         struct drm_device *dev = connector->dev;
5804         struct amdgpu_device *adev = drm_to_adev(dev);
5805         struct dm_connector_state *dm_state =
5806                 to_dm_connector_state(state);
5807         int ret = -EINVAL;
5808
5809         if (property == dev->mode_config.scaling_mode_property) {
5810                 switch (dm_state->scaling) {
5811                 case RMX_CENTER:
5812                         *val = DRM_MODE_SCALE_CENTER;
5813                         break;
5814                 case RMX_ASPECT:
5815                         *val = DRM_MODE_SCALE_ASPECT;
5816                         break;
5817                 case RMX_FULL:
5818                         *val = DRM_MODE_SCALE_FULLSCREEN;
5819                         break;
5820                 case RMX_OFF:
5821                 default:
5822                         *val = DRM_MODE_SCALE_NONE;
5823                         break;
5824                 }
5825                 ret = 0;
5826         } else if (property == adev->mode_info.underscan_hborder_property) {
5827                 *val = dm_state->underscan_hborder;
5828                 ret = 0;
5829         } else if (property == adev->mode_info.underscan_vborder_property) {
5830                 *val = dm_state->underscan_vborder;
5831                 ret = 0;
5832         } else if (property == adev->mode_info.underscan_property) {
5833                 *val = dm_state->underscan_enable;
5834                 ret = 0;
5835         } else if (property == adev->mode_info.abm_level_property) {
5836                 *val = dm_state->abm_level;
5837                 ret = 0;
5838         }
5839
5840         return ret;
5841 }
5842
5843 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5844 {
5845         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5846
5847         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5848 }
5849
5850 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5851 {
5852         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5853         const struct dc_link *link = aconnector->dc_link;
5854         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5855         struct amdgpu_display_manager *dm = &adev->dm;
5856
5857         /*
5858          * Call only if mst_mgr was iniitalized before since it's not done
5859          * for all connector types.
5860          */
5861         if (aconnector->mst_mgr.dev)
5862                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5863
5864 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5865         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5866
5867         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5868             link->type != dc_connection_none &&
5869             dm->backlight_dev) {
5870                 backlight_device_unregister(dm->backlight_dev);
5871                 dm->backlight_dev = NULL;
5872         }
5873 #endif
5874
5875         if (aconnector->dc_em_sink)
5876                 dc_sink_release(aconnector->dc_em_sink);
5877         aconnector->dc_em_sink = NULL;
5878         if (aconnector->dc_sink)
5879                 dc_sink_release(aconnector->dc_sink);
5880         aconnector->dc_sink = NULL;
5881
5882         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5883         drm_connector_unregister(connector);
5884         drm_connector_cleanup(connector);
5885         if (aconnector->i2c) {
5886                 i2c_del_adapter(&aconnector->i2c->base);
5887                 kfree(aconnector->i2c);
5888         }
5889         kfree(aconnector->dm_dp_aux.aux.name);
5890
5891         kfree(connector);
5892 }
5893
5894 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5895 {
5896         struct dm_connector_state *state =
5897                 to_dm_connector_state(connector->state);
5898
5899         if (connector->state)
5900                 __drm_atomic_helper_connector_destroy_state(connector->state);
5901
5902         kfree(state);
5903
5904         state = kzalloc(sizeof(*state), GFP_KERNEL);
5905
5906         if (state) {
5907                 state->scaling = RMX_OFF;
5908                 state->underscan_enable = false;
5909                 state->underscan_hborder = 0;
5910                 state->underscan_vborder = 0;
5911                 state->base.max_requested_bpc = 8;
5912                 state->vcpi_slots = 0;
5913                 state->pbn = 0;
5914                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5915                         state->abm_level = amdgpu_dm_abm_level;
5916
5917                 __drm_atomic_helper_connector_reset(connector, &state->base);
5918         }
5919 }
5920
5921 struct drm_connector_state *
5922 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5923 {
5924         struct dm_connector_state *state =
5925                 to_dm_connector_state(connector->state);
5926
5927         struct dm_connector_state *new_state =
5928                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5929
5930         if (!new_state)
5931                 return NULL;
5932
5933         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5934
5935         new_state->freesync_capable = state->freesync_capable;
5936         new_state->abm_level = state->abm_level;
5937         new_state->scaling = state->scaling;
5938         new_state->underscan_enable = state->underscan_enable;
5939         new_state->underscan_hborder = state->underscan_hborder;
5940         new_state->underscan_vborder = state->underscan_vborder;
5941         new_state->vcpi_slots = state->vcpi_slots;
5942         new_state->pbn = state->pbn;
5943         return &new_state->base;
5944 }
5945
5946 static int
5947 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5948 {
5949         struct amdgpu_dm_connector *amdgpu_dm_connector =
5950                 to_amdgpu_dm_connector(connector);
5951         int r;
5952
5953         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5954             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5955                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5956                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5957                 if (r)
5958                         return r;
5959         }
5960
5961 #if defined(CONFIG_DEBUG_FS)
5962         connector_debugfs_init(amdgpu_dm_connector);
5963 #endif
5964
5965         return 0;
5966 }
5967
5968 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5969         .reset = amdgpu_dm_connector_funcs_reset,
5970         .detect = amdgpu_dm_connector_detect,
5971         .fill_modes = drm_helper_probe_single_connector_modes,
5972         .destroy = amdgpu_dm_connector_destroy,
5973         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5974         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5975         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5976         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5977         .late_register = amdgpu_dm_connector_late_register,
5978         .early_unregister = amdgpu_dm_connector_unregister
5979 };
5980
5981 static int get_modes(struct drm_connector *connector)
5982 {
5983         return amdgpu_dm_connector_get_modes(connector);
5984 }
5985
5986 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5987 {
5988         struct dc_sink_init_data init_params = {
5989                         .link = aconnector->dc_link,
5990                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5991         };
5992         struct edid *edid;
5993
5994         if (!aconnector->base.edid_blob_ptr) {
5995                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5996                                 aconnector->base.name);
5997
5998                 aconnector->base.force = DRM_FORCE_OFF;
5999                 aconnector->base.override_edid = false;
6000                 return;
6001         }
6002
6003         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6004
6005         aconnector->edid = edid;
6006
6007         aconnector->dc_em_sink = dc_link_add_remote_sink(
6008                 aconnector->dc_link,
6009                 (uint8_t *)edid,
6010                 (edid->extensions + 1) * EDID_LENGTH,
6011                 &init_params);
6012
6013         if (aconnector->base.force == DRM_FORCE_ON) {
6014                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6015                 aconnector->dc_link->local_sink :
6016                 aconnector->dc_em_sink;
6017                 dc_sink_retain(aconnector->dc_sink);
6018         }
6019 }
6020
6021 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6022 {
6023         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6024
6025         /*
6026          * In case of headless boot with force on for DP managed connector
6027          * Those settings have to be != 0 to get initial modeset
6028          */
6029         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6030                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6031                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6032         }
6033
6034
6035         aconnector->base.override_edid = true;
6036         create_eml_sink(aconnector);
6037 }
6038
6039 static struct dc_stream_state *
6040 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6041                                 const struct drm_display_mode *drm_mode,
6042                                 const struct dm_connector_state *dm_state,
6043                                 const struct dc_stream_state *old_stream)
6044 {
6045         struct drm_connector *connector = &aconnector->base;
6046         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6047         struct dc_stream_state *stream;
6048         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6049         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6050         enum dc_status dc_result = DC_OK;
6051
6052         do {
6053                 stream = create_stream_for_sink(aconnector, drm_mode,
6054                                                 dm_state, old_stream,
6055                                                 requested_bpc);
6056                 if (stream == NULL) {
6057                         DRM_ERROR("Failed to create stream for sink!\n");
6058                         break;
6059                 }
6060
6061                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6062
6063                 if (dc_result != DC_OK) {
6064                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6065                                       drm_mode->hdisplay,
6066                                       drm_mode->vdisplay,
6067                                       drm_mode->clock,
6068                                       dc_result,
6069                                       dc_status_to_str(dc_result));
6070
6071                         dc_stream_release(stream);
6072                         stream = NULL;
6073                         requested_bpc -= 2; /* lower bpc to retry validation */
6074                 }
6075
6076         } while (stream == NULL && requested_bpc >= 6);
6077
6078         return stream;
6079 }
6080
6081 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6082                                    struct drm_display_mode *mode)
6083 {
6084         int result = MODE_ERROR;
6085         struct dc_sink *dc_sink;
6086         /* TODO: Unhardcode stream count */
6087         struct dc_stream_state *stream;
6088         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6089
6090         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6091                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6092                 return result;
6093
6094         /*
6095          * Only run this the first time mode_valid is called to initilialize
6096          * EDID mgmt
6097          */
6098         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6099                 !aconnector->dc_em_sink)
6100                 handle_edid_mgmt(aconnector);
6101
6102         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6103
6104         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6105                                 aconnector->base.force != DRM_FORCE_ON) {
6106                 DRM_ERROR("dc_sink is NULL!\n");
6107                 goto fail;
6108         }
6109
6110         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6111         if (stream) {
6112                 dc_stream_release(stream);
6113                 result = MODE_OK;
6114         }
6115
6116 fail:
6117         /* TODO: error handling*/
6118         return result;
6119 }
6120
6121 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6122                                 struct dc_info_packet *out)
6123 {
6124         struct hdmi_drm_infoframe frame;
6125         unsigned char buf[30]; /* 26 + 4 */
6126         ssize_t len;
6127         int ret, i;
6128
6129         memset(out, 0, sizeof(*out));
6130
6131         if (!state->hdr_output_metadata)
6132                 return 0;
6133
6134         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6135         if (ret)
6136                 return ret;
6137
6138         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6139         if (len < 0)
6140                 return (int)len;
6141
6142         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6143         if (len != 30)
6144                 return -EINVAL;
6145
6146         /* Prepare the infopacket for DC. */
6147         switch (state->connector->connector_type) {
6148         case DRM_MODE_CONNECTOR_HDMIA:
6149                 out->hb0 = 0x87; /* type */
6150                 out->hb1 = 0x01; /* version */
6151                 out->hb2 = 0x1A; /* length */
6152                 out->sb[0] = buf[3]; /* checksum */
6153                 i = 1;
6154                 break;
6155
6156         case DRM_MODE_CONNECTOR_DisplayPort:
6157         case DRM_MODE_CONNECTOR_eDP:
6158                 out->hb0 = 0x00; /* sdp id, zero */
6159                 out->hb1 = 0x87; /* type */
6160                 out->hb2 = 0x1D; /* payload len - 1 */
6161                 out->hb3 = (0x13 << 2); /* sdp version */
6162                 out->sb[0] = 0x01; /* version */
6163                 out->sb[1] = 0x1A; /* length */
6164                 i = 2;
6165                 break;
6166
6167         default:
6168                 return -EINVAL;
6169         }
6170
6171         memcpy(&out->sb[i], &buf[4], 26);
6172         out->valid = true;
6173
6174         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6175                        sizeof(out->sb), false);
6176
6177         return 0;
6178 }
6179
6180 static bool
6181 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6182                           const struct drm_connector_state *new_state)
6183 {
6184         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6185         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6186
6187         if (old_blob != new_blob) {
6188                 if (old_blob && new_blob &&
6189                     old_blob->length == new_blob->length)
6190                         return memcmp(old_blob->data, new_blob->data,
6191                                       old_blob->length);
6192
6193                 return true;
6194         }
6195
6196         return false;
6197 }
6198
6199 static int
6200 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6201                                  struct drm_atomic_state *state)
6202 {
6203         struct drm_connector_state *new_con_state =
6204                 drm_atomic_get_new_connector_state(state, conn);
6205         struct drm_connector_state *old_con_state =
6206                 drm_atomic_get_old_connector_state(state, conn);
6207         struct drm_crtc *crtc = new_con_state->crtc;
6208         struct drm_crtc_state *new_crtc_state;
6209         int ret;
6210
6211         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6212
6213         if (!crtc)
6214                 return 0;
6215
6216         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6217                 struct dc_info_packet hdr_infopacket;
6218
6219                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6220                 if (ret)
6221                         return ret;
6222
6223                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6224                 if (IS_ERR(new_crtc_state))
6225                         return PTR_ERR(new_crtc_state);
6226
6227                 /*
6228                  * DC considers the stream backends changed if the
6229                  * static metadata changes. Forcing the modeset also
6230                  * gives a simple way for userspace to switch from
6231                  * 8bpc to 10bpc when setting the metadata to enter
6232                  * or exit HDR.
6233                  *
6234                  * Changing the static metadata after it's been
6235                  * set is permissible, however. So only force a
6236                  * modeset if we're entering or exiting HDR.
6237                  */
6238                 new_crtc_state->mode_changed =
6239                         !old_con_state->hdr_output_metadata ||
6240                         !new_con_state->hdr_output_metadata;
6241         }
6242
6243         return 0;
6244 }
6245
6246 static const struct drm_connector_helper_funcs
6247 amdgpu_dm_connector_helper_funcs = {
6248         /*
6249          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6250          * modes will be filtered by drm_mode_validate_size(), and those modes
6251          * are missing after user start lightdm. So we need to renew modes list.
6252          * in get_modes call back, not just return the modes count
6253          */
6254         .get_modes = get_modes,
6255         .mode_valid = amdgpu_dm_connector_mode_valid,
6256         .atomic_check = amdgpu_dm_connector_atomic_check,
6257 };
6258
6259 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6260 {
6261 }
6262
6263 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6264 {
6265         struct drm_atomic_state *state = new_crtc_state->state;
6266         struct drm_plane *plane;
6267         int num_active = 0;
6268
6269         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6270                 struct drm_plane_state *new_plane_state;
6271
6272                 /* Cursor planes are "fake". */
6273                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6274                         continue;
6275
6276                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6277
6278                 if (!new_plane_state) {
6279                         /*
6280                          * The plane is enable on the CRTC and hasn't changed
6281                          * state. This means that it previously passed
6282                          * validation and is therefore enabled.
6283                          */
6284                         num_active += 1;
6285                         continue;
6286                 }
6287
6288                 /* We need a framebuffer to be considered enabled. */
6289                 num_active += (new_plane_state->fb != NULL);
6290         }
6291
6292         return num_active;
6293 }
6294
6295 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6296                                          struct drm_crtc_state *new_crtc_state)
6297 {
6298         struct dm_crtc_state *dm_new_crtc_state =
6299                 to_dm_crtc_state(new_crtc_state);
6300
6301         dm_new_crtc_state->active_planes = 0;
6302
6303         if (!dm_new_crtc_state->stream)
6304                 return;
6305
6306         dm_new_crtc_state->active_planes =
6307                 count_crtc_active_planes(new_crtc_state);
6308 }
6309
6310 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6311                                        struct drm_atomic_state *state)
6312 {
6313         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6314                                                                           crtc);
6315         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6316         struct dc *dc = adev->dm.dc;
6317         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6318         int ret = -EINVAL;
6319
6320         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6321
6322         dm_update_crtc_active_planes(crtc, crtc_state);
6323
6324         if (unlikely(!dm_crtc_state->stream &&
6325                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6326                 WARN_ON(1);
6327                 return ret;
6328         }
6329
6330         /*
6331          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6332          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6333          * planes are disabled, which is not supported by the hardware. And there is legacy
6334          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6335          */
6336         if (crtc_state->enable &&
6337             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6338                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6339                 return -EINVAL;
6340         }
6341
6342         /* In some use cases, like reset, no stream is attached */
6343         if (!dm_crtc_state->stream)
6344                 return 0;
6345
6346         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6347                 return 0;
6348
6349         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6350         return ret;
6351 }
6352
6353 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6354                                       const struct drm_display_mode *mode,
6355                                       struct drm_display_mode *adjusted_mode)
6356 {
6357         return true;
6358 }
6359
6360 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6361         .disable = dm_crtc_helper_disable,
6362         .atomic_check = dm_crtc_helper_atomic_check,
6363         .mode_fixup = dm_crtc_helper_mode_fixup,
6364         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6365 };
6366
6367 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6368 {
6369
6370 }
6371
6372 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6373 {
6374         switch (display_color_depth) {
6375                 case COLOR_DEPTH_666:
6376                         return 6;
6377                 case COLOR_DEPTH_888:
6378                         return 8;
6379                 case COLOR_DEPTH_101010:
6380                         return 10;
6381                 case COLOR_DEPTH_121212:
6382                         return 12;
6383                 case COLOR_DEPTH_141414:
6384                         return 14;
6385                 case COLOR_DEPTH_161616:
6386                         return 16;
6387                 default:
6388                         break;
6389                 }
6390         return 0;
6391 }
6392
6393 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6394                                           struct drm_crtc_state *crtc_state,
6395                                           struct drm_connector_state *conn_state)
6396 {
6397         struct drm_atomic_state *state = crtc_state->state;
6398         struct drm_connector *connector = conn_state->connector;
6399         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6400         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6401         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6402         struct drm_dp_mst_topology_mgr *mst_mgr;
6403         struct drm_dp_mst_port *mst_port;
6404         enum dc_color_depth color_depth;
6405         int clock, bpp = 0;
6406         bool is_y420 = false;
6407
6408         if (!aconnector->port || !aconnector->dc_sink)
6409                 return 0;
6410
6411         mst_port = aconnector->port;
6412         mst_mgr = &aconnector->mst_port->mst_mgr;
6413
6414         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6415                 return 0;
6416
6417         if (!state->duplicated) {
6418                 int max_bpc = conn_state->max_requested_bpc;
6419                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6420                                 aconnector->force_yuv420_output;
6421                 color_depth = convert_color_depth_from_display_info(connector,
6422                                                                     is_y420,
6423                                                                     max_bpc);
6424                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6425                 clock = adjusted_mode->clock;
6426                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6427         }
6428         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6429                                                                            mst_mgr,
6430                                                                            mst_port,
6431                                                                            dm_new_connector_state->pbn,
6432                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6433         if (dm_new_connector_state->vcpi_slots < 0) {
6434                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6435                 return dm_new_connector_state->vcpi_slots;
6436         }
6437         return 0;
6438 }
6439
6440 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6441         .disable = dm_encoder_helper_disable,
6442         .atomic_check = dm_encoder_helper_atomic_check
6443 };
6444
6445 #if defined(CONFIG_DRM_AMD_DC_DCN)
6446 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6447                                             struct dc_state *dc_state)
6448 {
6449         struct dc_stream_state *stream = NULL;
6450         struct drm_connector *connector;
6451         struct drm_connector_state *new_con_state, *old_con_state;
6452         struct amdgpu_dm_connector *aconnector;
6453         struct dm_connector_state *dm_conn_state;
6454         int i, j, clock, bpp;
6455         int vcpi, pbn_div, pbn = 0;
6456
6457         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6458
6459                 aconnector = to_amdgpu_dm_connector(connector);
6460
6461                 if (!aconnector->port)
6462                         continue;
6463
6464                 if (!new_con_state || !new_con_state->crtc)
6465                         continue;
6466
6467                 dm_conn_state = to_dm_connector_state(new_con_state);
6468
6469                 for (j = 0; j < dc_state->stream_count; j++) {
6470                         stream = dc_state->streams[j];
6471                         if (!stream)
6472                                 continue;
6473
6474                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6475                                 break;
6476
6477                         stream = NULL;
6478                 }
6479
6480                 if (!stream)
6481                         continue;
6482
6483                 if (stream->timing.flags.DSC != 1) {
6484                         drm_dp_mst_atomic_enable_dsc(state,
6485                                                      aconnector->port,
6486                                                      dm_conn_state->pbn,
6487                                                      0,
6488                                                      false);
6489                         continue;
6490                 }
6491
6492                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6493                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6494                 clock = stream->timing.pix_clk_100hz / 10;
6495                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6496                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6497                                                     aconnector->port,
6498                                                     pbn, pbn_div,
6499                                                     true);
6500                 if (vcpi < 0)
6501                         return vcpi;
6502
6503                 dm_conn_state->pbn = pbn;
6504                 dm_conn_state->vcpi_slots = vcpi;
6505         }
6506         return 0;
6507 }
6508 #endif
6509
6510 static void dm_drm_plane_reset(struct drm_plane *plane)
6511 {
6512         struct dm_plane_state *amdgpu_state = NULL;
6513
6514         if (plane->state)
6515                 plane->funcs->atomic_destroy_state(plane, plane->state);
6516
6517         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6518         WARN_ON(amdgpu_state == NULL);
6519
6520         if (amdgpu_state)
6521                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6522 }
6523
6524 static struct drm_plane_state *
6525 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6526 {
6527         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6528
6529         old_dm_plane_state = to_dm_plane_state(plane->state);
6530         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6531         if (!dm_plane_state)
6532                 return NULL;
6533
6534         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6535
6536         if (old_dm_plane_state->dc_state) {
6537                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6538                 dc_plane_state_retain(dm_plane_state->dc_state);
6539         }
6540
6541         return &dm_plane_state->base;
6542 }
6543
6544 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6545                                 struct drm_plane_state *state)
6546 {
6547         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6548
6549         if (dm_plane_state->dc_state)
6550                 dc_plane_state_release(dm_plane_state->dc_state);
6551
6552         drm_atomic_helper_plane_destroy_state(plane, state);
6553 }
6554
6555 static const struct drm_plane_funcs dm_plane_funcs = {
6556         .update_plane   = drm_atomic_helper_update_plane,
6557         .disable_plane  = drm_atomic_helper_disable_plane,
6558         .destroy        = drm_primary_helper_destroy,
6559         .reset = dm_drm_plane_reset,
6560         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6561         .atomic_destroy_state = dm_drm_plane_destroy_state,
6562         .format_mod_supported = dm_plane_format_mod_supported,
6563 };
6564
6565 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6566                                       struct drm_plane_state *new_state)
6567 {
6568         struct amdgpu_framebuffer *afb;
6569         struct drm_gem_object *obj;
6570         struct amdgpu_device *adev;
6571         struct amdgpu_bo *rbo;
6572         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6573         struct list_head list;
6574         struct ttm_validate_buffer tv;
6575         struct ww_acquire_ctx ticket;
6576         uint32_t domain;
6577         int r;
6578
6579         if (!new_state->fb) {
6580                 DRM_DEBUG_DRIVER("No FB bound\n");
6581                 return 0;
6582         }
6583
6584         afb = to_amdgpu_framebuffer(new_state->fb);
6585         obj = new_state->fb->obj[0];
6586         rbo = gem_to_amdgpu_bo(obj);
6587         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6588         INIT_LIST_HEAD(&list);
6589
6590         tv.bo = &rbo->tbo;
6591         tv.num_shared = 1;
6592         list_add(&tv.head, &list);
6593
6594         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6595         if (r) {
6596                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6597                 return r;
6598         }
6599
6600         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6601                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6602         else
6603                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6604
6605         r = amdgpu_bo_pin(rbo, domain);
6606         if (unlikely(r != 0)) {
6607                 if (r != -ERESTARTSYS)
6608                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6609                 ttm_eu_backoff_reservation(&ticket, &list);
6610                 return r;
6611         }
6612
6613         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6614         if (unlikely(r != 0)) {
6615                 amdgpu_bo_unpin(rbo);
6616                 ttm_eu_backoff_reservation(&ticket, &list);
6617                 DRM_ERROR("%p bind failed\n", rbo);
6618                 return r;
6619         }
6620
6621         ttm_eu_backoff_reservation(&ticket, &list);
6622
6623         afb->address = amdgpu_bo_gpu_offset(rbo);
6624
6625         amdgpu_bo_ref(rbo);
6626
6627         /**
6628          * We don't do surface updates on planes that have been newly created,
6629          * but we also don't have the afb->address during atomic check.
6630          *
6631          * Fill in buffer attributes depending on the address here, but only on
6632          * newly created planes since they're not being used by DC yet and this
6633          * won't modify global state.
6634          */
6635         dm_plane_state_old = to_dm_plane_state(plane->state);
6636         dm_plane_state_new = to_dm_plane_state(new_state);
6637
6638         if (dm_plane_state_new->dc_state &&
6639             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6640                 struct dc_plane_state *plane_state =
6641                         dm_plane_state_new->dc_state;
6642                 bool force_disable_dcc = !plane_state->dcc.enable;
6643
6644                 fill_plane_buffer_attributes(
6645                         adev, afb, plane_state->format, plane_state->rotation,
6646                         afb->tiling_flags,
6647                         &plane_state->tiling_info, &plane_state->plane_size,
6648                         &plane_state->dcc, &plane_state->address,
6649                         afb->tmz_surface, force_disable_dcc);
6650         }
6651
6652         return 0;
6653 }
6654
6655 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6656                                        struct drm_plane_state *old_state)
6657 {
6658         struct amdgpu_bo *rbo;
6659         int r;
6660
6661         if (!old_state->fb)
6662                 return;
6663
6664         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6665         r = amdgpu_bo_reserve(rbo, false);
6666         if (unlikely(r)) {
6667                 DRM_ERROR("failed to reserve rbo before unpin\n");
6668                 return;
6669         }
6670
6671         amdgpu_bo_unpin(rbo);
6672         amdgpu_bo_unreserve(rbo);
6673         amdgpu_bo_unref(&rbo);
6674 }
6675
6676 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6677                                        struct drm_crtc_state *new_crtc_state)
6678 {
6679         struct drm_framebuffer *fb = state->fb;
6680         int min_downscale, max_upscale;
6681         int min_scale = 0;
6682         int max_scale = INT_MAX;
6683
6684         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
6685         if (fb && state->crtc) {
6686                 /* Validate viewport to cover the case when only the position changes */
6687                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
6688                         int viewport_width = state->crtc_w;
6689                         int viewport_height = state->crtc_h;
6690
6691                         if (state->crtc_x < 0)
6692                                 viewport_width += state->crtc_x;
6693                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
6694                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
6695
6696                         if (state->crtc_y < 0)
6697                                 viewport_height += state->crtc_y;
6698                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
6699                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
6700
6701                         if (viewport_width < 0 || viewport_height < 0) {
6702                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
6703                                 return -EINVAL;
6704                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
6705                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
6706                                 return -EINVAL;
6707                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
6708                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
6709                                 return -EINVAL;
6710                         }
6711
6712                 }
6713
6714                 /* Get min/max allowed scaling factors from plane caps. */
6715                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6716                                              &min_downscale, &max_upscale);
6717                 /*
6718                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6719                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6720                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6721                  */
6722                 min_scale = (1000 << 16) / max_upscale;
6723                 max_scale = (1000 << 16) / min_downscale;
6724         }
6725
6726         return drm_atomic_helper_check_plane_state(
6727                 state, new_crtc_state, min_scale, max_scale, true, true);
6728 }
6729
6730 static int dm_plane_atomic_check(struct drm_plane *plane,
6731                                  struct drm_atomic_state *state)
6732 {
6733         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
6734                                                                                  plane);
6735         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6736         struct dc *dc = adev->dm.dc;
6737         struct dm_plane_state *dm_plane_state;
6738         struct dc_scaling_info scaling_info;
6739         struct drm_crtc_state *new_crtc_state;
6740         int ret;
6741
6742         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
6743
6744         dm_plane_state = to_dm_plane_state(new_plane_state);
6745
6746         if (!dm_plane_state->dc_state)
6747                 return 0;
6748
6749         new_crtc_state =
6750                 drm_atomic_get_new_crtc_state(state,
6751                                               new_plane_state->crtc);
6752         if (!new_crtc_state)
6753                 return -EINVAL;
6754
6755         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
6756         if (ret)
6757                 return ret;
6758
6759         ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
6760         if (ret)
6761                 return ret;
6762
6763         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6764                 return 0;
6765
6766         return -EINVAL;
6767 }
6768
6769 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6770                                        struct drm_atomic_state *state)
6771 {
6772         /* Only support async updates on cursor planes. */
6773         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6774                 return -EINVAL;
6775
6776         return 0;
6777 }
6778
6779 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6780                                          struct drm_atomic_state *state)
6781 {
6782         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
6783                                                                            plane);
6784         struct drm_plane_state *old_state =
6785                 drm_atomic_get_old_plane_state(state, plane);
6786
6787         trace_amdgpu_dm_atomic_update_cursor(new_state);
6788
6789         swap(plane->state->fb, new_state->fb);
6790
6791         plane->state->src_x = new_state->src_x;
6792         plane->state->src_y = new_state->src_y;
6793         plane->state->src_w = new_state->src_w;
6794         plane->state->src_h = new_state->src_h;
6795         plane->state->crtc_x = new_state->crtc_x;
6796         plane->state->crtc_y = new_state->crtc_y;
6797         plane->state->crtc_w = new_state->crtc_w;
6798         plane->state->crtc_h = new_state->crtc_h;
6799
6800         handle_cursor_update(plane, old_state);
6801 }
6802
6803 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6804         .prepare_fb = dm_plane_helper_prepare_fb,
6805         .cleanup_fb = dm_plane_helper_cleanup_fb,
6806         .atomic_check = dm_plane_atomic_check,
6807         .atomic_async_check = dm_plane_atomic_async_check,
6808         .atomic_async_update = dm_plane_atomic_async_update
6809 };
6810
6811 /*
6812  * TODO: these are currently initialized to rgb formats only.
6813  * For future use cases we should either initialize them dynamically based on
6814  * plane capabilities, or initialize this array to all formats, so internal drm
6815  * check will succeed, and let DC implement proper check
6816  */
6817 static const uint32_t rgb_formats[] = {
6818         DRM_FORMAT_XRGB8888,
6819         DRM_FORMAT_ARGB8888,
6820         DRM_FORMAT_RGBA8888,
6821         DRM_FORMAT_XRGB2101010,
6822         DRM_FORMAT_XBGR2101010,
6823         DRM_FORMAT_ARGB2101010,
6824         DRM_FORMAT_ABGR2101010,
6825         DRM_FORMAT_XBGR8888,
6826         DRM_FORMAT_ABGR8888,
6827         DRM_FORMAT_RGB565,
6828 };
6829
6830 static const uint32_t overlay_formats[] = {
6831         DRM_FORMAT_XRGB8888,
6832         DRM_FORMAT_ARGB8888,
6833         DRM_FORMAT_RGBA8888,
6834         DRM_FORMAT_XBGR8888,
6835         DRM_FORMAT_ABGR8888,
6836         DRM_FORMAT_RGB565
6837 };
6838
6839 static const u32 cursor_formats[] = {
6840         DRM_FORMAT_ARGB8888
6841 };
6842
6843 static int get_plane_formats(const struct drm_plane *plane,
6844                              const struct dc_plane_cap *plane_cap,
6845                              uint32_t *formats, int max_formats)
6846 {
6847         int i, num_formats = 0;
6848
6849         /*
6850          * TODO: Query support for each group of formats directly from
6851          * DC plane caps. This will require adding more formats to the
6852          * caps list.
6853          */
6854
6855         switch (plane->type) {
6856         case DRM_PLANE_TYPE_PRIMARY:
6857                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6858                         if (num_formats >= max_formats)
6859                                 break;
6860
6861                         formats[num_formats++] = rgb_formats[i];
6862                 }
6863
6864                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6865                         formats[num_formats++] = DRM_FORMAT_NV12;
6866                 if (plane_cap && plane_cap->pixel_format_support.p010)
6867                         formats[num_formats++] = DRM_FORMAT_P010;
6868                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6869                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6870                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6871                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6872                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6873                 }
6874                 break;
6875
6876         case DRM_PLANE_TYPE_OVERLAY:
6877                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6878                         if (num_formats >= max_formats)
6879                                 break;
6880
6881                         formats[num_formats++] = overlay_formats[i];
6882                 }
6883                 break;
6884
6885         case DRM_PLANE_TYPE_CURSOR:
6886                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6887                         if (num_formats >= max_formats)
6888                                 break;
6889
6890                         formats[num_formats++] = cursor_formats[i];
6891                 }
6892                 break;
6893         }
6894
6895         return num_formats;
6896 }
6897
6898 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6899                                 struct drm_plane *plane,
6900                                 unsigned long possible_crtcs,
6901                                 const struct dc_plane_cap *plane_cap)
6902 {
6903         uint32_t formats[32];
6904         int num_formats;
6905         int res = -EPERM;
6906         unsigned int supported_rotations;
6907         uint64_t *modifiers = NULL;
6908
6909         num_formats = get_plane_formats(plane, plane_cap, formats,
6910                                         ARRAY_SIZE(formats));
6911
6912         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6913         if (res)
6914                 return res;
6915
6916         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6917                                        &dm_plane_funcs, formats, num_formats,
6918                                        modifiers, plane->type, NULL);
6919         kfree(modifiers);
6920         if (res)
6921                 return res;
6922
6923         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6924             plane_cap && plane_cap->per_pixel_alpha) {
6925                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6926                                           BIT(DRM_MODE_BLEND_PREMULTI);
6927
6928                 drm_plane_create_alpha_property(plane);
6929                 drm_plane_create_blend_mode_property(plane, blend_caps);
6930         }
6931
6932         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6933             plane_cap &&
6934             (plane_cap->pixel_format_support.nv12 ||
6935              plane_cap->pixel_format_support.p010)) {
6936                 /* This only affects YUV formats. */
6937                 drm_plane_create_color_properties(
6938                         plane,
6939                         BIT(DRM_COLOR_YCBCR_BT601) |
6940                         BIT(DRM_COLOR_YCBCR_BT709) |
6941                         BIT(DRM_COLOR_YCBCR_BT2020),
6942                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6943                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6944                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6945         }
6946
6947         supported_rotations =
6948                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6949                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6950
6951         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6952             plane->type != DRM_PLANE_TYPE_CURSOR)
6953                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6954                                                    supported_rotations);
6955
6956         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6957
6958         /* Create (reset) the plane state */
6959         if (plane->funcs->reset)
6960                 plane->funcs->reset(plane);
6961
6962         return 0;
6963 }
6964
6965 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6966                                struct drm_plane *plane,
6967                                uint32_t crtc_index)
6968 {
6969         struct amdgpu_crtc *acrtc = NULL;
6970         struct drm_plane *cursor_plane;
6971
6972         int res = -ENOMEM;
6973
6974         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6975         if (!cursor_plane)
6976                 goto fail;
6977
6978         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6979         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6980
6981         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6982         if (!acrtc)
6983                 goto fail;
6984
6985         res = drm_crtc_init_with_planes(
6986                         dm->ddev,
6987                         &acrtc->base,
6988                         plane,
6989                         cursor_plane,
6990                         &amdgpu_dm_crtc_funcs, NULL);
6991
6992         if (res)
6993                 goto fail;
6994
6995         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6996
6997         /* Create (reset) the plane state */
6998         if (acrtc->base.funcs->reset)
6999                 acrtc->base.funcs->reset(&acrtc->base);
7000
7001         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7002         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7003
7004         acrtc->crtc_id = crtc_index;
7005         acrtc->base.enabled = false;
7006         acrtc->otg_inst = -1;
7007
7008         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7009         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7010                                    true, MAX_COLOR_LUT_ENTRIES);
7011         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7012
7013         return 0;
7014
7015 fail:
7016         kfree(acrtc);
7017         kfree(cursor_plane);
7018         return res;
7019 }
7020
7021
7022 static int to_drm_connector_type(enum signal_type st)
7023 {
7024         switch (st) {
7025         case SIGNAL_TYPE_HDMI_TYPE_A:
7026                 return DRM_MODE_CONNECTOR_HDMIA;
7027         case SIGNAL_TYPE_EDP:
7028                 return DRM_MODE_CONNECTOR_eDP;
7029         case SIGNAL_TYPE_LVDS:
7030                 return DRM_MODE_CONNECTOR_LVDS;
7031         case SIGNAL_TYPE_RGB:
7032                 return DRM_MODE_CONNECTOR_VGA;
7033         case SIGNAL_TYPE_DISPLAY_PORT:
7034         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7035                 return DRM_MODE_CONNECTOR_DisplayPort;
7036         case SIGNAL_TYPE_DVI_DUAL_LINK:
7037         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7038                 return DRM_MODE_CONNECTOR_DVID;
7039         case SIGNAL_TYPE_VIRTUAL:
7040                 return DRM_MODE_CONNECTOR_VIRTUAL;
7041
7042         default:
7043                 return DRM_MODE_CONNECTOR_Unknown;
7044         }
7045 }
7046
7047 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7048 {
7049         struct drm_encoder *encoder;
7050
7051         /* There is only one encoder per connector */
7052         drm_connector_for_each_possible_encoder(connector, encoder)
7053                 return encoder;
7054
7055         return NULL;
7056 }
7057
7058 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7059 {
7060         struct drm_encoder *encoder;
7061         struct amdgpu_encoder *amdgpu_encoder;
7062
7063         encoder = amdgpu_dm_connector_to_encoder(connector);
7064
7065         if (encoder == NULL)
7066                 return;
7067
7068         amdgpu_encoder = to_amdgpu_encoder(encoder);
7069
7070         amdgpu_encoder->native_mode.clock = 0;
7071
7072         if (!list_empty(&connector->probed_modes)) {
7073                 struct drm_display_mode *preferred_mode = NULL;
7074
7075                 list_for_each_entry(preferred_mode,
7076                                     &connector->probed_modes,
7077                                     head) {
7078                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7079                                 amdgpu_encoder->native_mode = *preferred_mode;
7080
7081                         break;
7082                 }
7083
7084         }
7085 }
7086
7087 static struct drm_display_mode *
7088 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7089                              char *name,
7090                              int hdisplay, int vdisplay)
7091 {
7092         struct drm_device *dev = encoder->dev;
7093         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7094         struct drm_display_mode *mode = NULL;
7095         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7096
7097         mode = drm_mode_duplicate(dev, native_mode);
7098
7099         if (mode == NULL)
7100                 return NULL;
7101
7102         mode->hdisplay = hdisplay;
7103         mode->vdisplay = vdisplay;
7104         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7105         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7106
7107         return mode;
7108
7109 }
7110
7111 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7112                                                  struct drm_connector *connector)
7113 {
7114         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7115         struct drm_display_mode *mode = NULL;
7116         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7117         struct amdgpu_dm_connector *amdgpu_dm_connector =
7118                                 to_amdgpu_dm_connector(connector);
7119         int i;
7120         int n;
7121         struct mode_size {
7122                 char name[DRM_DISPLAY_MODE_LEN];
7123                 int w;
7124                 int h;
7125         } common_modes[] = {
7126                 {  "640x480",  640,  480},
7127                 {  "800x600",  800,  600},
7128                 { "1024x768", 1024,  768},
7129                 { "1280x720", 1280,  720},
7130                 { "1280x800", 1280,  800},
7131                 {"1280x1024", 1280, 1024},
7132                 { "1440x900", 1440,  900},
7133                 {"1680x1050", 1680, 1050},
7134                 {"1600x1200", 1600, 1200},
7135                 {"1920x1080", 1920, 1080},
7136                 {"1920x1200", 1920, 1200}
7137         };
7138
7139         n = ARRAY_SIZE(common_modes);
7140
7141         for (i = 0; i < n; i++) {
7142                 struct drm_display_mode *curmode = NULL;
7143                 bool mode_existed = false;
7144
7145                 if (common_modes[i].w > native_mode->hdisplay ||
7146                     common_modes[i].h > native_mode->vdisplay ||
7147                    (common_modes[i].w == native_mode->hdisplay &&
7148                     common_modes[i].h == native_mode->vdisplay))
7149                         continue;
7150
7151                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7152                         if (common_modes[i].w == curmode->hdisplay &&
7153                             common_modes[i].h == curmode->vdisplay) {
7154                                 mode_existed = true;
7155                                 break;
7156                         }
7157                 }
7158
7159                 if (mode_existed)
7160                         continue;
7161
7162                 mode = amdgpu_dm_create_common_mode(encoder,
7163                                 common_modes[i].name, common_modes[i].w,
7164                                 common_modes[i].h);
7165                 drm_mode_probed_add(connector, mode);
7166                 amdgpu_dm_connector->num_modes++;
7167         }
7168 }
7169
7170 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7171                                               struct edid *edid)
7172 {
7173         struct amdgpu_dm_connector *amdgpu_dm_connector =
7174                         to_amdgpu_dm_connector(connector);
7175
7176         if (edid) {
7177                 /* empty probed_modes */
7178                 INIT_LIST_HEAD(&connector->probed_modes);
7179                 amdgpu_dm_connector->num_modes =
7180                                 drm_add_edid_modes(connector, edid);
7181
7182                 /* sorting the probed modes before calling function
7183                  * amdgpu_dm_get_native_mode() since EDID can have
7184                  * more than one preferred mode. The modes that are
7185                  * later in the probed mode list could be of higher
7186                  * and preferred resolution. For example, 3840x2160
7187                  * resolution in base EDID preferred timing and 4096x2160
7188                  * preferred resolution in DID extension block later.
7189                  */
7190                 drm_mode_sort(&connector->probed_modes);
7191                 amdgpu_dm_get_native_mode(connector);
7192
7193                 /* Freesync capabilities are reset by calling
7194                  * drm_add_edid_modes() and need to be
7195                  * restored here.
7196                  */
7197                 amdgpu_dm_update_freesync_caps(connector, edid);
7198         } else {
7199                 amdgpu_dm_connector->num_modes = 0;
7200         }
7201 }
7202
7203 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7204                               struct drm_display_mode *mode)
7205 {
7206         struct drm_display_mode *m;
7207
7208         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
7209                 if (drm_mode_equal(m, mode))
7210                         return true;
7211         }
7212
7213         return false;
7214 }
7215
7216 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7217 {
7218         const struct drm_display_mode *m;
7219         struct drm_display_mode *new_mode;
7220         uint i;
7221         uint32_t new_modes_count = 0;
7222
7223         /* Standard FPS values
7224          *
7225          * 23.976   - TV/NTSC
7226          * 24       - Cinema
7227          * 25       - TV/PAL
7228          * 29.97    - TV/NTSC
7229          * 30       - TV/NTSC
7230          * 48       - Cinema HFR
7231          * 50       - TV/PAL
7232          * 60       - Commonly used
7233          * 48,72,96 - Multiples of 24
7234          */
7235         const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000,
7236                                          48000, 50000, 60000, 72000, 96000 };
7237
7238         /*
7239          * Find mode with highest refresh rate with the same resolution
7240          * as the preferred mode. Some monitors report a preferred mode
7241          * with lower resolution than the highest refresh rate supported.
7242          */
7243
7244         m = get_highest_refresh_rate_mode(aconnector, true);
7245         if (!m)
7246                 return 0;
7247
7248         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7249                 uint64_t target_vtotal, target_vtotal_diff;
7250                 uint64_t num, den;
7251
7252                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7253                         continue;
7254
7255                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7256                     common_rates[i] > aconnector->max_vfreq * 1000)
7257                         continue;
7258
7259                 num = (unsigned long long)m->clock * 1000 * 1000;
7260                 den = common_rates[i] * (unsigned long long)m->htotal;
7261                 target_vtotal = div_u64(num, den);
7262                 target_vtotal_diff = target_vtotal - m->vtotal;
7263
7264                 /* Check for illegal modes */
7265                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7266                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7267                     m->vtotal + target_vtotal_diff < m->vsync_end)
7268                         continue;
7269
7270                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7271                 if (!new_mode)
7272                         goto out;
7273
7274                 new_mode->vtotal += (u16)target_vtotal_diff;
7275                 new_mode->vsync_start += (u16)target_vtotal_diff;
7276                 new_mode->vsync_end += (u16)target_vtotal_diff;
7277                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7278                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7279
7280                 if (!is_duplicate_mode(aconnector, new_mode)) {
7281                         drm_mode_probed_add(&aconnector->base, new_mode);
7282                         new_modes_count += 1;
7283                 } else
7284                         drm_mode_destroy(aconnector->base.dev, new_mode);
7285         }
7286  out:
7287         return new_modes_count;
7288 }
7289
7290 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7291                                                    struct edid *edid)
7292 {
7293         struct amdgpu_dm_connector *amdgpu_dm_connector =
7294                 to_amdgpu_dm_connector(connector);
7295
7296         if (!(amdgpu_freesync_vid_mode && edid))
7297                 return;
7298         
7299         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7300                 amdgpu_dm_connector->num_modes +=
7301                         add_fs_modes(amdgpu_dm_connector);
7302 }
7303
7304 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7305 {
7306         struct amdgpu_dm_connector *amdgpu_dm_connector =
7307                         to_amdgpu_dm_connector(connector);
7308         struct drm_encoder *encoder;
7309         struct edid *edid = amdgpu_dm_connector->edid;
7310
7311         encoder = amdgpu_dm_connector_to_encoder(connector);
7312
7313         if (!drm_edid_is_valid(edid)) {
7314                 amdgpu_dm_connector->num_modes =
7315                                 drm_add_modes_noedid(connector, 640, 480);
7316         } else {
7317                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7318                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7319                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7320         }
7321         amdgpu_dm_fbc_init(connector);
7322
7323         return amdgpu_dm_connector->num_modes;
7324 }
7325
7326 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7327                                      struct amdgpu_dm_connector *aconnector,
7328                                      int connector_type,
7329                                      struct dc_link *link,
7330                                      int link_index)
7331 {
7332         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7333
7334         /*
7335          * Some of the properties below require access to state, like bpc.
7336          * Allocate some default initial connector state with our reset helper.
7337          */
7338         if (aconnector->base.funcs->reset)
7339                 aconnector->base.funcs->reset(&aconnector->base);
7340
7341         aconnector->connector_id = link_index;
7342         aconnector->dc_link = link;
7343         aconnector->base.interlace_allowed = false;
7344         aconnector->base.doublescan_allowed = false;
7345         aconnector->base.stereo_allowed = false;
7346         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7347         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7348         aconnector->audio_inst = -1;
7349         mutex_init(&aconnector->hpd_lock);
7350
7351         /*
7352          * configure support HPD hot plug connector_>polled default value is 0
7353          * which means HPD hot plug not supported
7354          */
7355         switch (connector_type) {
7356         case DRM_MODE_CONNECTOR_HDMIA:
7357                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7358                 aconnector->base.ycbcr_420_allowed =
7359                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7360                 break;
7361         case DRM_MODE_CONNECTOR_DisplayPort:
7362                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7363                 aconnector->base.ycbcr_420_allowed =
7364                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7365                 break;
7366         case DRM_MODE_CONNECTOR_DVID:
7367                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7368                 break;
7369         default:
7370                 break;
7371         }
7372
7373         drm_object_attach_property(&aconnector->base.base,
7374                                 dm->ddev->mode_config.scaling_mode_property,
7375                                 DRM_MODE_SCALE_NONE);
7376
7377         drm_object_attach_property(&aconnector->base.base,
7378                                 adev->mode_info.underscan_property,
7379                                 UNDERSCAN_OFF);
7380         drm_object_attach_property(&aconnector->base.base,
7381                                 adev->mode_info.underscan_hborder_property,
7382                                 0);
7383         drm_object_attach_property(&aconnector->base.base,
7384                                 adev->mode_info.underscan_vborder_property,
7385                                 0);
7386
7387         if (!aconnector->mst_port)
7388                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7389
7390         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7391         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7392         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7393
7394         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7395             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7396                 drm_object_attach_property(&aconnector->base.base,
7397                                 adev->mode_info.abm_level_property, 0);
7398         }
7399
7400         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7401             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7402             connector_type == DRM_MODE_CONNECTOR_eDP) {
7403                 drm_object_attach_property(
7404                         &aconnector->base.base,
7405                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7406
7407                 if (!aconnector->mst_port)
7408                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7409
7410 #ifdef CONFIG_DRM_AMD_DC_HDCP
7411                 if (adev->dm.hdcp_workqueue)
7412                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7413 #endif
7414         }
7415 }
7416
7417 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7418                               struct i2c_msg *msgs, int num)
7419 {
7420         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7421         struct ddc_service *ddc_service = i2c->ddc_service;
7422         struct i2c_command cmd;
7423         int i;
7424         int result = -EIO;
7425
7426         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7427
7428         if (!cmd.payloads)
7429                 return result;
7430
7431         cmd.number_of_payloads = num;
7432         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7433         cmd.speed = 100;
7434
7435         for (i = 0; i < num; i++) {
7436                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7437                 cmd.payloads[i].address = msgs[i].addr;
7438                 cmd.payloads[i].length = msgs[i].len;
7439                 cmd.payloads[i].data = msgs[i].buf;
7440         }
7441
7442         if (dc_submit_i2c(
7443                         ddc_service->ctx->dc,
7444                         ddc_service->ddc_pin->hw_info.ddc_channel,
7445                         &cmd))
7446                 result = num;
7447
7448         kfree(cmd.payloads);
7449         return result;
7450 }
7451
7452 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7453 {
7454         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7455 }
7456
7457 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7458         .master_xfer = amdgpu_dm_i2c_xfer,
7459         .functionality = amdgpu_dm_i2c_func,
7460 };
7461
7462 static struct amdgpu_i2c_adapter *
7463 create_i2c(struct ddc_service *ddc_service,
7464            int link_index,
7465            int *res)
7466 {
7467         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7468         struct amdgpu_i2c_adapter *i2c;
7469
7470         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7471         if (!i2c)
7472                 return NULL;
7473         i2c->base.owner = THIS_MODULE;
7474         i2c->base.class = I2C_CLASS_DDC;
7475         i2c->base.dev.parent = &adev->pdev->dev;
7476         i2c->base.algo = &amdgpu_dm_i2c_algo;
7477         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7478         i2c_set_adapdata(&i2c->base, i2c);
7479         i2c->ddc_service = ddc_service;
7480         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7481
7482         return i2c;
7483 }
7484
7485
7486 /*
7487  * Note: this function assumes that dc_link_detect() was called for the
7488  * dc_link which will be represented by this aconnector.
7489  */
7490 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7491                                     struct amdgpu_dm_connector *aconnector,
7492                                     uint32_t link_index,
7493                                     struct amdgpu_encoder *aencoder)
7494 {
7495         int res = 0;
7496         int connector_type;
7497         struct dc *dc = dm->dc;
7498         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7499         struct amdgpu_i2c_adapter *i2c;
7500
7501         link->priv = aconnector;
7502
7503         DRM_DEBUG_DRIVER("%s()\n", __func__);
7504
7505         i2c = create_i2c(link->ddc, link->link_index, &res);
7506         if (!i2c) {
7507                 DRM_ERROR("Failed to create i2c adapter data\n");
7508                 return -ENOMEM;
7509         }
7510
7511         aconnector->i2c = i2c;
7512         res = i2c_add_adapter(&i2c->base);
7513
7514         if (res) {
7515                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7516                 goto out_free;
7517         }
7518
7519         connector_type = to_drm_connector_type(link->connector_signal);
7520
7521         res = drm_connector_init_with_ddc(
7522                         dm->ddev,
7523                         &aconnector->base,
7524                         &amdgpu_dm_connector_funcs,
7525                         connector_type,
7526                         &i2c->base);
7527
7528         if (res) {
7529                 DRM_ERROR("connector_init failed\n");
7530                 aconnector->connector_id = -1;
7531                 goto out_free;
7532         }
7533
7534         drm_connector_helper_add(
7535                         &aconnector->base,
7536                         &amdgpu_dm_connector_helper_funcs);
7537
7538         amdgpu_dm_connector_init_helper(
7539                 dm,
7540                 aconnector,
7541                 connector_type,
7542                 link,
7543                 link_index);
7544
7545         drm_connector_attach_encoder(
7546                 &aconnector->base, &aencoder->base);
7547
7548         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7549                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7550                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7551
7552 out_free:
7553         if (res) {
7554                 kfree(i2c);
7555                 aconnector->i2c = NULL;
7556         }
7557         return res;
7558 }
7559
7560 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7561 {
7562         switch (adev->mode_info.num_crtc) {
7563         case 1:
7564                 return 0x1;
7565         case 2:
7566                 return 0x3;
7567         case 3:
7568                 return 0x7;
7569         case 4:
7570                 return 0xf;
7571         case 5:
7572                 return 0x1f;
7573         case 6:
7574         default:
7575                 return 0x3f;
7576         }
7577 }
7578
7579 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7580                                   struct amdgpu_encoder *aencoder,
7581                                   uint32_t link_index)
7582 {
7583         struct amdgpu_device *adev = drm_to_adev(dev);
7584
7585         int res = drm_encoder_init(dev,
7586                                    &aencoder->base,
7587                                    &amdgpu_dm_encoder_funcs,
7588                                    DRM_MODE_ENCODER_TMDS,
7589                                    NULL);
7590
7591         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7592
7593         if (!res)
7594                 aencoder->encoder_id = link_index;
7595         else
7596                 aencoder->encoder_id = -1;
7597
7598         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7599
7600         return res;
7601 }
7602
7603 static void manage_dm_interrupts(struct amdgpu_device *adev,
7604                                  struct amdgpu_crtc *acrtc,
7605                                  bool enable)
7606 {
7607         /*
7608          * We have no guarantee that the frontend index maps to the same
7609          * backend index - some even map to more than one.
7610          *
7611          * TODO: Use a different interrupt or check DC itself for the mapping.
7612          */
7613         int irq_type =
7614                 amdgpu_display_crtc_idx_to_irq_type(
7615                         adev,
7616                         acrtc->crtc_id);
7617
7618         if (enable) {
7619                 drm_crtc_vblank_on(&acrtc->base);
7620                 amdgpu_irq_get(
7621                         adev,
7622                         &adev->pageflip_irq,
7623                         irq_type);
7624 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7625                 amdgpu_irq_get(
7626                         adev,
7627                         &adev->vline0_irq,
7628                         irq_type);
7629 #endif
7630         } else {
7631 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7632                 amdgpu_irq_put(
7633                         adev,
7634                         &adev->vline0_irq,
7635                         irq_type);
7636 #endif
7637                 amdgpu_irq_put(
7638                         adev,
7639                         &adev->pageflip_irq,
7640                         irq_type);
7641                 drm_crtc_vblank_off(&acrtc->base);
7642         }
7643 }
7644
7645 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7646                                       struct amdgpu_crtc *acrtc)
7647 {
7648         int irq_type =
7649                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7650
7651         /**
7652          * This reads the current state for the IRQ and force reapplies
7653          * the setting to hardware.
7654          */
7655         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7656 }
7657
7658 static bool
7659 is_scaling_state_different(const struct dm_connector_state *dm_state,
7660                            const struct dm_connector_state *old_dm_state)
7661 {
7662         if (dm_state->scaling != old_dm_state->scaling)
7663                 return true;
7664         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7665                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7666                         return true;
7667         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7668                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7669                         return true;
7670         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7671                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7672                 return true;
7673         return false;
7674 }
7675
7676 #ifdef CONFIG_DRM_AMD_DC_HDCP
7677 static bool is_content_protection_different(struct drm_connector_state *state,
7678                                             const struct drm_connector_state *old_state,
7679                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7680 {
7681         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7682         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7683
7684         /* Handle: Type0/1 change */
7685         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7686             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7687                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7688                 return true;
7689         }
7690
7691         /* CP is being re enabled, ignore this
7692          *
7693          * Handles:     ENABLED -> DESIRED
7694          */
7695         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7696             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7697                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7698                 return false;
7699         }
7700
7701         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7702          *
7703          * Handles:     UNDESIRED -> ENABLED
7704          */
7705         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7706             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7707                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7708
7709         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7710          * hot-plug, headless s3, dpms
7711          *
7712          * Handles:     DESIRED -> DESIRED (Special case)
7713          */
7714         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7715             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7716                 dm_con_state->update_hdcp = false;
7717                 return true;
7718         }
7719
7720         /*
7721          * Handles:     UNDESIRED -> UNDESIRED
7722          *              DESIRED -> DESIRED
7723          *              ENABLED -> ENABLED
7724          */
7725         if (old_state->content_protection == state->content_protection)
7726                 return false;
7727
7728         /*
7729          * Handles:     UNDESIRED -> DESIRED
7730          *              DESIRED -> UNDESIRED
7731          *              ENABLED -> UNDESIRED
7732          */
7733         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7734                 return true;
7735
7736         /*
7737          * Handles:     DESIRED -> ENABLED
7738          */
7739         return false;
7740 }
7741
7742 #endif
7743 static void remove_stream(struct amdgpu_device *adev,
7744                           struct amdgpu_crtc *acrtc,
7745                           struct dc_stream_state *stream)
7746 {
7747         /* this is the update mode case */
7748
7749         acrtc->otg_inst = -1;
7750         acrtc->enabled = false;
7751 }
7752
7753 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7754                                struct dc_cursor_position *position)
7755 {
7756         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7757         int x, y;
7758         int xorigin = 0, yorigin = 0;
7759
7760         if (!crtc || !plane->state->fb)
7761                 return 0;
7762
7763         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7764             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7765                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7766                           __func__,
7767                           plane->state->crtc_w,
7768                           plane->state->crtc_h);
7769                 return -EINVAL;
7770         }
7771
7772         x = plane->state->crtc_x;
7773         y = plane->state->crtc_y;
7774
7775         if (x <= -amdgpu_crtc->max_cursor_width ||
7776             y <= -amdgpu_crtc->max_cursor_height)
7777                 return 0;
7778
7779         if (x < 0) {
7780                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7781                 x = 0;
7782         }
7783         if (y < 0) {
7784                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7785                 y = 0;
7786         }
7787         position->enable = true;
7788         position->translate_by_source = true;
7789         position->x = x;
7790         position->y = y;
7791         position->x_hotspot = xorigin;
7792         position->y_hotspot = yorigin;
7793
7794         return 0;
7795 }
7796
7797 static void handle_cursor_update(struct drm_plane *plane,
7798                                  struct drm_plane_state *old_plane_state)
7799 {
7800         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7801         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7802         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7803         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7804         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7805         uint64_t address = afb ? afb->address : 0;
7806         struct dc_cursor_position position = {0};
7807         struct dc_cursor_attributes attributes;
7808         int ret;
7809
7810         if (!plane->state->fb && !old_plane_state->fb)
7811                 return;
7812
7813         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7814                          __func__,
7815                          amdgpu_crtc->crtc_id,
7816                          plane->state->crtc_w,
7817                          plane->state->crtc_h);
7818
7819         ret = get_cursor_position(plane, crtc, &position);
7820         if (ret)
7821                 return;
7822
7823         if (!position.enable) {
7824                 /* turn off cursor */
7825                 if (crtc_state && crtc_state->stream) {
7826                         mutex_lock(&adev->dm.dc_lock);
7827                         dc_stream_set_cursor_position(crtc_state->stream,
7828                                                       &position);
7829                         mutex_unlock(&adev->dm.dc_lock);
7830                 }
7831                 return;
7832         }
7833
7834         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7835         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7836
7837         memset(&attributes, 0, sizeof(attributes));
7838         attributes.address.high_part = upper_32_bits(address);
7839         attributes.address.low_part  = lower_32_bits(address);
7840         attributes.width             = plane->state->crtc_w;
7841         attributes.height            = plane->state->crtc_h;
7842         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7843         attributes.rotation_angle    = 0;
7844         attributes.attribute_flags.value = 0;
7845
7846         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7847
7848         if (crtc_state->stream) {
7849                 mutex_lock(&adev->dm.dc_lock);
7850                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7851                                                          &attributes))
7852                         DRM_ERROR("DC failed to set cursor attributes\n");
7853
7854                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7855                                                    &position))
7856                         DRM_ERROR("DC failed to set cursor position\n");
7857                 mutex_unlock(&adev->dm.dc_lock);
7858         }
7859 }
7860
7861 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7862 {
7863
7864         assert_spin_locked(&acrtc->base.dev->event_lock);
7865         WARN_ON(acrtc->event);
7866
7867         acrtc->event = acrtc->base.state->event;
7868
7869         /* Set the flip status */
7870         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7871
7872         /* Mark this event as consumed */
7873         acrtc->base.state->event = NULL;
7874
7875         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7876                                                  acrtc->crtc_id);
7877 }
7878
7879 static void update_freesync_state_on_stream(
7880         struct amdgpu_display_manager *dm,
7881         struct dm_crtc_state *new_crtc_state,
7882         struct dc_stream_state *new_stream,
7883         struct dc_plane_state *surface,
7884         u32 flip_timestamp_in_us)
7885 {
7886         struct mod_vrr_params vrr_params;
7887         struct dc_info_packet vrr_infopacket = {0};
7888         struct amdgpu_device *adev = dm->adev;
7889         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7890         unsigned long flags;
7891         bool pack_sdp_v1_3 = false;
7892
7893         if (!new_stream)
7894                 return;
7895
7896         /*
7897          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7898          * For now it's sufficient to just guard against these conditions.
7899          */
7900
7901         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7902                 return;
7903
7904         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7905         vrr_params = acrtc->dm_irq_params.vrr_params;
7906
7907         if (surface) {
7908                 mod_freesync_handle_preflip(
7909                         dm->freesync_module,
7910                         surface,
7911                         new_stream,
7912                         flip_timestamp_in_us,
7913                         &vrr_params);
7914
7915                 if (adev->family < AMDGPU_FAMILY_AI &&
7916                     amdgpu_dm_vrr_active(new_crtc_state)) {
7917                         mod_freesync_handle_v_update(dm->freesync_module,
7918                                                      new_stream, &vrr_params);
7919
7920                         /* Need to call this before the frame ends. */
7921                         dc_stream_adjust_vmin_vmax(dm->dc,
7922                                                    new_crtc_state->stream,
7923                                                    &vrr_params.adjust);
7924                 }
7925         }
7926
7927         mod_freesync_build_vrr_infopacket(
7928                 dm->freesync_module,
7929                 new_stream,
7930                 &vrr_params,
7931                 PACKET_TYPE_VRR,
7932                 TRANSFER_FUNC_UNKNOWN,
7933                 &vrr_infopacket,
7934                 pack_sdp_v1_3);
7935
7936         new_crtc_state->freesync_timing_changed |=
7937                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7938                         &vrr_params.adjust,
7939                         sizeof(vrr_params.adjust)) != 0);
7940
7941         new_crtc_state->freesync_vrr_info_changed |=
7942                 (memcmp(&new_crtc_state->vrr_infopacket,
7943                         &vrr_infopacket,
7944                         sizeof(vrr_infopacket)) != 0);
7945
7946         acrtc->dm_irq_params.vrr_params = vrr_params;
7947         new_crtc_state->vrr_infopacket = vrr_infopacket;
7948
7949         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7950         new_stream->vrr_infopacket = vrr_infopacket;
7951
7952         if (new_crtc_state->freesync_vrr_info_changed)
7953                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7954                               new_crtc_state->base.crtc->base.id,
7955                               (int)new_crtc_state->base.vrr_enabled,
7956                               (int)vrr_params.state);
7957
7958         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7959 }
7960
7961 static void update_stream_irq_parameters(
7962         struct amdgpu_display_manager *dm,
7963         struct dm_crtc_state *new_crtc_state)
7964 {
7965         struct dc_stream_state *new_stream = new_crtc_state->stream;
7966         struct mod_vrr_params vrr_params;
7967         struct mod_freesync_config config = new_crtc_state->freesync_config;
7968         struct amdgpu_device *adev = dm->adev;
7969         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7970         unsigned long flags;
7971
7972         if (!new_stream)
7973                 return;
7974
7975         /*
7976          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7977          * For now it's sufficient to just guard against these conditions.
7978          */
7979         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7980                 return;
7981
7982         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7983         vrr_params = acrtc->dm_irq_params.vrr_params;
7984
7985         if (new_crtc_state->vrr_supported &&
7986             config.min_refresh_in_uhz &&
7987             config.max_refresh_in_uhz) {
7988                 /*
7989                  * if freesync compatible mode was set, config.state will be set
7990                  * in atomic check
7991                  */
7992                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7993                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7994                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7995                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7996                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7997                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7998                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7999                 } else {
8000                         config.state = new_crtc_state->base.vrr_enabled ?
8001                                                      VRR_STATE_ACTIVE_VARIABLE :
8002                                                      VRR_STATE_INACTIVE;
8003                 }
8004         } else {
8005                 config.state = VRR_STATE_UNSUPPORTED;
8006         }
8007
8008         mod_freesync_build_vrr_params(dm->freesync_module,
8009                                       new_stream,
8010                                       &config, &vrr_params);
8011
8012         new_crtc_state->freesync_timing_changed |=
8013                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8014                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8015
8016         new_crtc_state->freesync_config = config;
8017         /* Copy state for access from DM IRQ handler */
8018         acrtc->dm_irq_params.freesync_config = config;
8019         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8020         acrtc->dm_irq_params.vrr_params = vrr_params;
8021         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8022 }
8023
8024 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8025                                             struct dm_crtc_state *new_state)
8026 {
8027         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8028         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8029
8030         if (!old_vrr_active && new_vrr_active) {
8031                 /* Transition VRR inactive -> active:
8032                  * While VRR is active, we must not disable vblank irq, as a
8033                  * reenable after disable would compute bogus vblank/pflip
8034                  * timestamps if it likely happened inside display front-porch.
8035                  *
8036                  * We also need vupdate irq for the actual core vblank handling
8037                  * at end of vblank.
8038                  */
8039                 dm_set_vupdate_irq(new_state->base.crtc, true);
8040                 drm_crtc_vblank_get(new_state->base.crtc);
8041                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8042                                  __func__, new_state->base.crtc->base.id);
8043         } else if (old_vrr_active && !new_vrr_active) {
8044                 /* Transition VRR active -> inactive:
8045                  * Allow vblank irq disable again for fixed refresh rate.
8046                  */
8047                 dm_set_vupdate_irq(new_state->base.crtc, false);
8048                 drm_crtc_vblank_put(new_state->base.crtc);
8049                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8050                                  __func__, new_state->base.crtc->base.id);
8051         }
8052 }
8053
8054 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8055 {
8056         struct drm_plane *plane;
8057         struct drm_plane_state *old_plane_state, *new_plane_state;
8058         int i;
8059
8060         /*
8061          * TODO: Make this per-stream so we don't issue redundant updates for
8062          * commits with multiple streams.
8063          */
8064         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
8065                                        new_plane_state, i)
8066                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8067                         handle_cursor_update(plane, old_plane_state);
8068 }
8069
8070 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8071                                     struct dc_state *dc_state,
8072                                     struct drm_device *dev,
8073                                     struct amdgpu_display_manager *dm,
8074                                     struct drm_crtc *pcrtc,
8075                                     bool wait_for_vblank)
8076 {
8077         uint32_t i;
8078         uint64_t timestamp_ns;
8079         struct drm_plane *plane;
8080         struct drm_plane_state *old_plane_state, *new_plane_state;
8081         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8082         struct drm_crtc_state *new_pcrtc_state =
8083                         drm_atomic_get_new_crtc_state(state, pcrtc);
8084         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8085         struct dm_crtc_state *dm_old_crtc_state =
8086                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8087         int planes_count = 0, vpos, hpos;
8088         long r;
8089         unsigned long flags;
8090         struct amdgpu_bo *abo;
8091         uint32_t target_vblank, last_flip_vblank;
8092         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8093         bool pflip_present = false;
8094         struct {
8095                 struct dc_surface_update surface_updates[MAX_SURFACES];
8096                 struct dc_plane_info plane_infos[MAX_SURFACES];
8097                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8098                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8099                 struct dc_stream_update stream_update;
8100         } *bundle;
8101
8102         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8103
8104         if (!bundle) {
8105                 dm_error("Failed to allocate update bundle\n");
8106                 goto cleanup;
8107         }
8108
8109         /*
8110          * Disable the cursor first if we're disabling all the planes.
8111          * It'll remain on the screen after the planes are re-enabled
8112          * if we don't.
8113          */
8114         if (acrtc_state->active_planes == 0)
8115                 amdgpu_dm_commit_cursors(state);
8116
8117         /* update planes when needed */
8118         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8119                 struct drm_crtc *crtc = new_plane_state->crtc;
8120                 struct drm_crtc_state *new_crtc_state;
8121                 struct drm_framebuffer *fb = new_plane_state->fb;
8122                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8123                 bool plane_needs_flip;
8124                 struct dc_plane_state *dc_plane;
8125                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8126
8127                 /* Cursor plane is handled after stream updates */
8128                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8129                         continue;
8130
8131                 if (!fb || !crtc || pcrtc != crtc)
8132                         continue;
8133
8134                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8135                 if (!new_crtc_state->active)
8136                         continue;
8137
8138                 dc_plane = dm_new_plane_state->dc_state;
8139
8140                 bundle->surface_updates[planes_count].surface = dc_plane;
8141                 if (new_pcrtc_state->color_mgmt_changed) {
8142                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8143                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8144                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8145                 }
8146
8147                 fill_dc_scaling_info(new_plane_state,
8148                                      &bundle->scaling_infos[planes_count]);
8149
8150                 bundle->surface_updates[planes_count].scaling_info =
8151                         &bundle->scaling_infos[planes_count];
8152
8153                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8154
8155                 pflip_present = pflip_present || plane_needs_flip;
8156
8157                 if (!plane_needs_flip) {
8158                         planes_count += 1;
8159                         continue;
8160                 }
8161
8162                 abo = gem_to_amdgpu_bo(fb->obj[0]);
8163
8164                 /*
8165                  * Wait for all fences on this FB. Do limited wait to avoid
8166                  * deadlock during GPU reset when this fence will not signal
8167                  * but we hold reservation lock for the BO.
8168                  */
8169                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
8170                                                         false,
8171                                                         msecs_to_jiffies(5000));
8172                 if (unlikely(r <= 0))
8173                         DRM_ERROR("Waiting for fences timed out!");
8174
8175                 fill_dc_plane_info_and_addr(
8176                         dm->adev, new_plane_state,
8177                         afb->tiling_flags,
8178                         &bundle->plane_infos[planes_count],
8179                         &bundle->flip_addrs[planes_count].address,
8180                         afb->tmz_surface, false);
8181
8182                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
8183                                  new_plane_state->plane->index,
8184                                  bundle->plane_infos[planes_count].dcc.enable);
8185
8186                 bundle->surface_updates[planes_count].plane_info =
8187                         &bundle->plane_infos[planes_count];
8188
8189                 /*
8190                  * Only allow immediate flips for fast updates that don't
8191                  * change FB pitch, DCC state, rotation or mirroing.
8192                  */
8193                 bundle->flip_addrs[planes_count].flip_immediate =
8194                         crtc->state->async_flip &&
8195                         acrtc_state->update_type == UPDATE_TYPE_FAST;
8196
8197                 timestamp_ns = ktime_get_ns();
8198                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8199                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8200                 bundle->surface_updates[planes_count].surface = dc_plane;
8201
8202                 if (!bundle->surface_updates[planes_count].surface) {
8203                         DRM_ERROR("No surface for CRTC: id=%d\n",
8204                                         acrtc_attach->crtc_id);
8205                         continue;
8206                 }
8207
8208                 if (plane == pcrtc->primary)
8209                         update_freesync_state_on_stream(
8210                                 dm,
8211                                 acrtc_state,
8212                                 acrtc_state->stream,
8213                                 dc_plane,
8214                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8215
8216                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
8217                                  __func__,
8218                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8219                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8220
8221                 planes_count += 1;
8222
8223         }
8224
8225         if (pflip_present) {
8226                 if (!vrr_active) {
8227                         /* Use old throttling in non-vrr fixed refresh rate mode
8228                          * to keep flip scheduling based on target vblank counts
8229                          * working in a backwards compatible way, e.g., for
8230                          * clients using the GLX_OML_sync_control extension or
8231                          * DRI3/Present extension with defined target_msc.
8232                          */
8233                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8234                 }
8235                 else {
8236                         /* For variable refresh rate mode only:
8237                          * Get vblank of last completed flip to avoid > 1 vrr
8238                          * flips per video frame by use of throttling, but allow
8239                          * flip programming anywhere in the possibly large
8240                          * variable vrr vblank interval for fine-grained flip
8241                          * timing control and more opportunity to avoid stutter
8242                          * on late submission of flips.
8243                          */
8244                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8245                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8246                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8247                 }
8248
8249                 target_vblank = last_flip_vblank + wait_for_vblank;
8250
8251                 /*
8252                  * Wait until we're out of the vertical blank period before the one
8253                  * targeted by the flip
8254                  */
8255                 while ((acrtc_attach->enabled &&
8256                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8257                                                             0, &vpos, &hpos, NULL,
8258                                                             NULL, &pcrtc->hwmode)
8259                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8260                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8261                         (int)(target_vblank -
8262                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8263                         usleep_range(1000, 1100);
8264                 }
8265
8266                 /**
8267                  * Prepare the flip event for the pageflip interrupt to handle.
8268                  *
8269                  * This only works in the case where we've already turned on the
8270                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8271                  * from 0 -> n planes we have to skip a hardware generated event
8272                  * and rely on sending it from software.
8273                  */
8274                 if (acrtc_attach->base.state->event &&
8275                     acrtc_state->active_planes > 0) {
8276                         drm_crtc_vblank_get(pcrtc);
8277
8278                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8279
8280                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8281                         prepare_flip_isr(acrtc_attach);
8282
8283                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8284                 }
8285
8286                 if (acrtc_state->stream) {
8287                         if (acrtc_state->freesync_vrr_info_changed)
8288                                 bundle->stream_update.vrr_infopacket =
8289                                         &acrtc_state->stream->vrr_infopacket;
8290                 }
8291         }
8292
8293         /* Update the planes if changed or disable if we don't have any. */
8294         if ((planes_count || acrtc_state->active_planes == 0) &&
8295                 acrtc_state->stream) {
8296                 bundle->stream_update.stream = acrtc_state->stream;
8297                 if (new_pcrtc_state->mode_changed) {
8298                         bundle->stream_update.src = acrtc_state->stream->src;
8299                         bundle->stream_update.dst = acrtc_state->stream->dst;
8300                 }
8301
8302                 if (new_pcrtc_state->color_mgmt_changed) {
8303                         /*
8304                          * TODO: This isn't fully correct since we've actually
8305                          * already modified the stream in place.
8306                          */
8307                         bundle->stream_update.gamut_remap =
8308                                 &acrtc_state->stream->gamut_remap_matrix;
8309                         bundle->stream_update.output_csc_transform =
8310                                 &acrtc_state->stream->csc_color_matrix;
8311                         bundle->stream_update.out_transfer_func =
8312                                 acrtc_state->stream->out_transfer_func;
8313                 }
8314
8315                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8316                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8317                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8318
8319                 /*
8320                  * If FreeSync state on the stream has changed then we need to
8321                  * re-adjust the min/max bounds now that DC doesn't handle this
8322                  * as part of commit.
8323                  */
8324                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8325                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8326                         dc_stream_adjust_vmin_vmax(
8327                                 dm->dc, acrtc_state->stream,
8328                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8329                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8330                 }
8331                 mutex_lock(&dm->dc_lock);
8332                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8333                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8334                         amdgpu_dm_psr_disable(acrtc_state->stream);
8335
8336                 dc_commit_updates_for_stream(dm->dc,
8337                                                      bundle->surface_updates,
8338                                                      planes_count,
8339                                                      acrtc_state->stream,
8340                                                      &bundle->stream_update,
8341                                                      dc_state);
8342
8343                 /**
8344                  * Enable or disable the interrupts on the backend.
8345                  *
8346                  * Most pipes are put into power gating when unused.
8347                  *
8348                  * When power gating is enabled on a pipe we lose the
8349                  * interrupt enablement state when power gating is disabled.
8350                  *
8351                  * So we need to update the IRQ control state in hardware
8352                  * whenever the pipe turns on (since it could be previously
8353                  * power gated) or off (since some pipes can't be power gated
8354                  * on some ASICs).
8355                  */
8356                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8357                         dm_update_pflip_irq_state(drm_to_adev(dev),
8358                                                   acrtc_attach);
8359
8360                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8361                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8362                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8363                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8364                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8365                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8366                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8367                         amdgpu_dm_psr_enable(acrtc_state->stream);
8368                 }
8369
8370                 mutex_unlock(&dm->dc_lock);
8371         }
8372
8373         /*
8374          * Update cursor state *after* programming all the planes.
8375          * This avoids redundant programming in the case where we're going
8376          * to be disabling a single plane - those pipes are being disabled.
8377          */
8378         if (acrtc_state->active_planes)
8379                 amdgpu_dm_commit_cursors(state);
8380
8381 cleanup:
8382         kfree(bundle);
8383 }
8384
8385 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8386                                    struct drm_atomic_state *state)
8387 {
8388         struct amdgpu_device *adev = drm_to_adev(dev);
8389         struct amdgpu_dm_connector *aconnector;
8390         struct drm_connector *connector;
8391         struct drm_connector_state *old_con_state, *new_con_state;
8392         struct drm_crtc_state *new_crtc_state;
8393         struct dm_crtc_state *new_dm_crtc_state;
8394         const struct dc_stream_status *status;
8395         int i, inst;
8396
8397         /* Notify device removals. */
8398         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8399                 if (old_con_state->crtc != new_con_state->crtc) {
8400                         /* CRTC changes require notification. */
8401                         goto notify;
8402                 }
8403
8404                 if (!new_con_state->crtc)
8405                         continue;
8406
8407                 new_crtc_state = drm_atomic_get_new_crtc_state(
8408                         state, new_con_state->crtc);
8409
8410                 if (!new_crtc_state)
8411                         continue;
8412
8413                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8414                         continue;
8415
8416         notify:
8417                 aconnector = to_amdgpu_dm_connector(connector);
8418
8419                 mutex_lock(&adev->dm.audio_lock);
8420                 inst = aconnector->audio_inst;
8421                 aconnector->audio_inst = -1;
8422                 mutex_unlock(&adev->dm.audio_lock);
8423
8424                 amdgpu_dm_audio_eld_notify(adev, inst);
8425         }
8426
8427         /* Notify audio device additions. */
8428         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8429                 if (!new_con_state->crtc)
8430                         continue;
8431
8432                 new_crtc_state = drm_atomic_get_new_crtc_state(
8433                         state, new_con_state->crtc);
8434
8435                 if (!new_crtc_state)
8436                         continue;
8437
8438                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8439                         continue;
8440
8441                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8442                 if (!new_dm_crtc_state->stream)
8443                         continue;
8444
8445                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8446                 if (!status)
8447                         continue;
8448
8449                 aconnector = to_amdgpu_dm_connector(connector);
8450
8451                 mutex_lock(&adev->dm.audio_lock);
8452                 inst = status->audio_inst;
8453                 aconnector->audio_inst = inst;
8454                 mutex_unlock(&adev->dm.audio_lock);
8455
8456                 amdgpu_dm_audio_eld_notify(adev, inst);
8457         }
8458 }
8459
8460 /*
8461  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8462  * @crtc_state: the DRM CRTC state
8463  * @stream_state: the DC stream state.
8464  *
8465  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8466  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8467  */
8468 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8469                                                 struct dc_stream_state *stream_state)
8470 {
8471         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8472 }
8473
8474 /**
8475  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8476  * @state: The atomic state to commit
8477  *
8478  * This will tell DC to commit the constructed DC state from atomic_check,
8479  * programming the hardware. Any failures here implies a hardware failure, since
8480  * atomic check should have filtered anything non-kosher.
8481  */
8482 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8483 {
8484         struct drm_device *dev = state->dev;
8485         struct amdgpu_device *adev = drm_to_adev(dev);
8486         struct amdgpu_display_manager *dm = &adev->dm;
8487         struct dm_atomic_state *dm_state;
8488         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8489         uint32_t i, j;
8490         struct drm_crtc *crtc;
8491         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8492         unsigned long flags;
8493         bool wait_for_vblank = true;
8494         struct drm_connector *connector;
8495         struct drm_connector_state *old_con_state, *new_con_state;
8496         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8497         int crtc_disable_count = 0;
8498         bool mode_set_reset_required = false;
8499
8500         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8501
8502         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8503
8504         dm_state = dm_atomic_get_new_state(state);
8505         if (dm_state && dm_state->context) {
8506                 dc_state = dm_state->context;
8507         } else {
8508                 /* No state changes, retain current state. */
8509                 dc_state_temp = dc_create_state(dm->dc);
8510                 ASSERT(dc_state_temp);
8511                 dc_state = dc_state_temp;
8512                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8513         }
8514
8515         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8516                                        new_crtc_state, i) {
8517                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8518
8519                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8520
8521                 if (old_crtc_state->active &&
8522                     (!new_crtc_state->active ||
8523                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8524                         manage_dm_interrupts(adev, acrtc, false);
8525                         dc_stream_release(dm_old_crtc_state->stream);
8526                 }
8527         }
8528
8529         drm_atomic_helper_calc_timestamping_constants(state);
8530
8531         /* update changed items */
8532         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8533                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8534
8535                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8536                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8537
8538                 DRM_DEBUG_DRIVER(
8539                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8540                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8541                         "connectors_changed:%d\n",
8542                         acrtc->crtc_id,
8543                         new_crtc_state->enable,
8544                         new_crtc_state->active,
8545                         new_crtc_state->planes_changed,
8546                         new_crtc_state->mode_changed,
8547                         new_crtc_state->active_changed,
8548                         new_crtc_state->connectors_changed);
8549
8550                 /* Disable cursor if disabling crtc */
8551                 if (old_crtc_state->active && !new_crtc_state->active) {
8552                         struct dc_cursor_position position;
8553
8554                         memset(&position, 0, sizeof(position));
8555                         mutex_lock(&dm->dc_lock);
8556                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8557                         mutex_unlock(&dm->dc_lock);
8558                 }
8559
8560                 /* Copy all transient state flags into dc state */
8561                 if (dm_new_crtc_state->stream) {
8562                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8563                                                             dm_new_crtc_state->stream);
8564                 }
8565
8566                 /* handles headless hotplug case, updating new_state and
8567                  * aconnector as needed
8568                  */
8569
8570                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8571
8572                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8573
8574                         if (!dm_new_crtc_state->stream) {
8575                                 /*
8576                                  * this could happen because of issues with
8577                                  * userspace notifications delivery.
8578                                  * In this case userspace tries to set mode on
8579                                  * display which is disconnected in fact.
8580                                  * dc_sink is NULL in this case on aconnector.
8581                                  * We expect reset mode will come soon.
8582                                  *
8583                                  * This can also happen when unplug is done
8584                                  * during resume sequence ended
8585                                  *
8586                                  * In this case, we want to pretend we still
8587                                  * have a sink to keep the pipe running so that
8588                                  * hw state is consistent with the sw state
8589                                  */
8590                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8591                                                 __func__, acrtc->base.base.id);
8592                                 continue;
8593                         }
8594
8595                         if (dm_old_crtc_state->stream)
8596                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8597
8598                         pm_runtime_get_noresume(dev->dev);
8599
8600                         acrtc->enabled = true;
8601                         acrtc->hw_mode = new_crtc_state->mode;
8602                         crtc->hwmode = new_crtc_state->mode;
8603                         mode_set_reset_required = true;
8604                 } else if (modereset_required(new_crtc_state)) {
8605                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8606                         /* i.e. reset mode */
8607                         if (dm_old_crtc_state->stream)
8608                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8609
8610                         mode_set_reset_required = true;
8611                 }
8612         } /* for_each_crtc_in_state() */
8613
8614         if (dc_state) {
8615                 /* if there mode set or reset, disable eDP PSR */
8616                 if (mode_set_reset_required)
8617                         amdgpu_dm_psr_disable_all(dm);
8618
8619                 dm_enable_per_frame_crtc_master_sync(dc_state);
8620                 mutex_lock(&dm->dc_lock);
8621                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8622                 mutex_unlock(&dm->dc_lock);
8623         }
8624
8625         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8626                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8627
8628                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8629
8630                 if (dm_new_crtc_state->stream != NULL) {
8631                         const struct dc_stream_status *status =
8632                                         dc_stream_get_status(dm_new_crtc_state->stream);
8633
8634                         if (!status)
8635                                 status = dc_stream_get_status_from_state(dc_state,
8636                                                                          dm_new_crtc_state->stream);
8637                         if (!status)
8638                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8639                         else
8640                                 acrtc->otg_inst = status->primary_otg_inst;
8641                 }
8642         }
8643 #ifdef CONFIG_DRM_AMD_DC_HDCP
8644         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8645                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8646                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8647                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8648
8649                 new_crtc_state = NULL;
8650
8651                 if (acrtc)
8652                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8653
8654                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8655
8656                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8657                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8658                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8659                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8660                         dm_new_con_state->update_hdcp = true;
8661                         continue;
8662                 }
8663
8664                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8665                         hdcp_update_display(
8666                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8667                                 new_con_state->hdcp_content_type,
8668                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
8669         }
8670 #endif
8671
8672         /* Handle connector state changes */
8673         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8674                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8675                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8676                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8677                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8678                 struct dc_stream_update stream_update;
8679                 struct dc_info_packet hdr_packet;
8680                 struct dc_stream_status *status = NULL;
8681                 bool abm_changed, hdr_changed, scaling_changed;
8682
8683                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8684                 memset(&stream_update, 0, sizeof(stream_update));
8685
8686                 if (acrtc) {
8687                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8688                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8689                 }
8690
8691                 /* Skip any modesets/resets */
8692                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8693                         continue;
8694
8695                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8696                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8697
8698                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8699                                                              dm_old_con_state);
8700
8701                 abm_changed = dm_new_crtc_state->abm_level !=
8702                               dm_old_crtc_state->abm_level;
8703
8704                 hdr_changed =
8705                         is_hdr_metadata_different(old_con_state, new_con_state);
8706
8707                 if (!scaling_changed && !abm_changed && !hdr_changed)
8708                         continue;
8709
8710                 stream_update.stream = dm_new_crtc_state->stream;
8711                 if (scaling_changed) {
8712                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8713                                         dm_new_con_state, dm_new_crtc_state->stream);
8714
8715                         stream_update.src = dm_new_crtc_state->stream->src;
8716                         stream_update.dst = dm_new_crtc_state->stream->dst;
8717                 }
8718
8719                 if (abm_changed) {
8720                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8721
8722                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8723                 }
8724
8725                 if (hdr_changed) {
8726                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8727                         stream_update.hdr_static_metadata = &hdr_packet;
8728                 }
8729
8730                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8731                 WARN_ON(!status);
8732                 WARN_ON(!status->plane_count);
8733
8734                 /*
8735                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8736                  * Here we create an empty update on each plane.
8737                  * To fix this, DC should permit updating only stream properties.
8738                  */
8739                 for (j = 0; j < status->plane_count; j++)
8740                         dummy_updates[j].surface = status->plane_states[0];
8741
8742
8743                 mutex_lock(&dm->dc_lock);
8744                 dc_commit_updates_for_stream(dm->dc,
8745                                                      dummy_updates,
8746                                                      status->plane_count,
8747                                                      dm_new_crtc_state->stream,
8748                                                      &stream_update,
8749                                                      dc_state);
8750                 mutex_unlock(&dm->dc_lock);
8751         }
8752
8753         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8754         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8755                                       new_crtc_state, i) {
8756                 if (old_crtc_state->active && !new_crtc_state->active)
8757                         crtc_disable_count++;
8758
8759                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8760                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8761
8762                 /* For freesync config update on crtc state and params for irq */
8763                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8764
8765                 /* Handle vrr on->off / off->on transitions */
8766                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8767                                                 dm_new_crtc_state);
8768         }
8769
8770         /**
8771          * Enable interrupts for CRTCs that are newly enabled or went through
8772          * a modeset. It was intentionally deferred until after the front end
8773          * state was modified to wait until the OTG was on and so the IRQ
8774          * handlers didn't access stale or invalid state.
8775          */
8776         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8777                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8778 #ifdef CONFIG_DEBUG_FS
8779                 bool configure_crc = false;
8780                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8781 #endif
8782                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8783
8784                 if (new_crtc_state->active &&
8785                     (!old_crtc_state->active ||
8786                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8787                         dc_stream_retain(dm_new_crtc_state->stream);
8788                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8789                         manage_dm_interrupts(adev, acrtc, true);
8790
8791 #ifdef CONFIG_DEBUG_FS
8792                         /**
8793                          * Frontend may have changed so reapply the CRC capture
8794                          * settings for the stream.
8795                          */
8796                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8797                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8798                         cur_crc_src = acrtc->dm_irq_params.crc_src;
8799                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8800
8801                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8802                                 configure_crc = true;
8803 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8804                                 if (amdgpu_dm_crc_window_is_activated(crtc))
8805                                         configure_crc = false;
8806 #endif
8807                         }
8808
8809                         if (configure_crc)
8810                                 amdgpu_dm_crtc_configure_crc_source(
8811                                         crtc, dm_new_crtc_state, cur_crc_src);
8812 #endif
8813                 }
8814         }
8815
8816         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8817                 if (new_crtc_state->async_flip)
8818                         wait_for_vblank = false;
8819
8820         /* update planes when needed per crtc*/
8821         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8822                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8823
8824                 if (dm_new_crtc_state->stream)
8825                         amdgpu_dm_commit_planes(state, dc_state, dev,
8826                                                 dm, crtc, wait_for_vblank);
8827         }
8828
8829         /* Update audio instances for each connector. */
8830         amdgpu_dm_commit_audio(dev, state);
8831
8832         /*
8833          * send vblank event on all events not handled in flip and
8834          * mark consumed event for drm_atomic_helper_commit_hw_done
8835          */
8836         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8837         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8838
8839                 if (new_crtc_state->event)
8840                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8841
8842                 new_crtc_state->event = NULL;
8843         }
8844         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8845
8846         /* Signal HW programming completion */
8847         drm_atomic_helper_commit_hw_done(state);
8848
8849         if (wait_for_vblank)
8850                 drm_atomic_helper_wait_for_flip_done(dev, state);
8851
8852         drm_atomic_helper_cleanup_planes(dev, state);
8853
8854         /* return the stolen vga memory back to VRAM */
8855         if (!adev->mman.keep_stolen_vga_memory)
8856                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8857         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8858
8859         /*
8860          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8861          * so we can put the GPU into runtime suspend if we're not driving any
8862          * displays anymore
8863          */
8864         for (i = 0; i < crtc_disable_count; i++)
8865                 pm_runtime_put_autosuspend(dev->dev);
8866         pm_runtime_mark_last_busy(dev->dev);
8867
8868         if (dc_state_temp)
8869                 dc_release_state(dc_state_temp);
8870 }
8871
8872
8873 static int dm_force_atomic_commit(struct drm_connector *connector)
8874 {
8875         int ret = 0;
8876         struct drm_device *ddev = connector->dev;
8877         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8878         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8879         struct drm_plane *plane = disconnected_acrtc->base.primary;
8880         struct drm_connector_state *conn_state;
8881         struct drm_crtc_state *crtc_state;
8882         struct drm_plane_state *plane_state;
8883
8884         if (!state)
8885                 return -ENOMEM;
8886
8887         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8888
8889         /* Construct an atomic state to restore previous display setting */
8890
8891         /*
8892          * Attach connectors to drm_atomic_state
8893          */
8894         conn_state = drm_atomic_get_connector_state(state, connector);
8895
8896         ret = PTR_ERR_OR_ZERO(conn_state);
8897         if (ret)
8898                 goto out;
8899
8900         /* Attach crtc to drm_atomic_state*/
8901         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8902
8903         ret = PTR_ERR_OR_ZERO(crtc_state);
8904         if (ret)
8905                 goto out;
8906
8907         /* force a restore */
8908         crtc_state->mode_changed = true;
8909
8910         /* Attach plane to drm_atomic_state */
8911         plane_state = drm_atomic_get_plane_state(state, plane);
8912
8913         ret = PTR_ERR_OR_ZERO(plane_state);
8914         if (ret)
8915                 goto out;
8916
8917         /* Call commit internally with the state we just constructed */
8918         ret = drm_atomic_commit(state);
8919
8920 out:
8921         drm_atomic_state_put(state);
8922         if (ret)
8923                 DRM_ERROR("Restoring old state failed with %i\n", ret);
8924
8925         return ret;
8926 }
8927
8928 /*
8929  * This function handles all cases when set mode does not come upon hotplug.
8930  * This includes when a display is unplugged then plugged back into the
8931  * same port and when running without usermode desktop manager supprot
8932  */
8933 void dm_restore_drm_connector_state(struct drm_device *dev,
8934                                     struct drm_connector *connector)
8935 {
8936         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8937         struct amdgpu_crtc *disconnected_acrtc;
8938         struct dm_crtc_state *acrtc_state;
8939
8940         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8941                 return;
8942
8943         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8944         if (!disconnected_acrtc)
8945                 return;
8946
8947         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8948         if (!acrtc_state->stream)
8949                 return;
8950
8951         /*
8952          * If the previous sink is not released and different from the current,
8953          * we deduce we are in a state where we can not rely on usermode call
8954          * to turn on the display, so we do it here
8955          */
8956         if (acrtc_state->stream->sink != aconnector->dc_sink)
8957                 dm_force_atomic_commit(&aconnector->base);
8958 }
8959
8960 /*
8961  * Grabs all modesetting locks to serialize against any blocking commits,
8962  * Waits for completion of all non blocking commits.
8963  */
8964 static int do_aquire_global_lock(struct drm_device *dev,
8965                                  struct drm_atomic_state *state)
8966 {
8967         struct drm_crtc *crtc;
8968         struct drm_crtc_commit *commit;
8969         long ret;
8970
8971         /*
8972          * Adding all modeset locks to aquire_ctx will
8973          * ensure that when the framework release it the
8974          * extra locks we are locking here will get released to
8975          */
8976         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8977         if (ret)
8978                 return ret;
8979
8980         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8981                 spin_lock(&crtc->commit_lock);
8982                 commit = list_first_entry_or_null(&crtc->commit_list,
8983                                 struct drm_crtc_commit, commit_entry);
8984                 if (commit)
8985                         drm_crtc_commit_get(commit);
8986                 spin_unlock(&crtc->commit_lock);
8987
8988                 if (!commit)
8989                         continue;
8990
8991                 /*
8992                  * Make sure all pending HW programming completed and
8993                  * page flips done
8994                  */
8995                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8996
8997                 if (ret > 0)
8998                         ret = wait_for_completion_interruptible_timeout(
8999                                         &commit->flip_done, 10*HZ);
9000
9001                 if (ret == 0)
9002                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9003                                   "timed out\n", crtc->base.id, crtc->name);
9004
9005                 drm_crtc_commit_put(commit);
9006         }
9007
9008         return ret < 0 ? ret : 0;
9009 }
9010
9011 static void get_freesync_config_for_crtc(
9012         struct dm_crtc_state *new_crtc_state,
9013         struct dm_connector_state *new_con_state)
9014 {
9015         struct mod_freesync_config config = {0};
9016         struct amdgpu_dm_connector *aconnector =
9017                         to_amdgpu_dm_connector(new_con_state->base.connector);
9018         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9019         int vrefresh = drm_mode_vrefresh(mode);
9020         bool fs_vid_mode = false;
9021
9022         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9023                                         vrefresh >= aconnector->min_vfreq &&
9024                                         vrefresh <= aconnector->max_vfreq;
9025
9026         if (new_crtc_state->vrr_supported) {
9027                 new_crtc_state->stream->ignore_msa_timing_param = true;
9028                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9029
9030                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9031                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9032                 config.vsif_supported = true;
9033                 config.btr = true;
9034
9035                 if (fs_vid_mode) {
9036                         config.state = VRR_STATE_ACTIVE_FIXED;
9037                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9038                         goto out;
9039                 } else if (new_crtc_state->base.vrr_enabled) {
9040                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9041                 } else {
9042                         config.state = VRR_STATE_INACTIVE;
9043                 }
9044         }
9045 out:
9046         new_crtc_state->freesync_config = config;
9047 }
9048
9049 static void reset_freesync_config_for_crtc(
9050         struct dm_crtc_state *new_crtc_state)
9051 {
9052         new_crtc_state->vrr_supported = false;
9053
9054         memset(&new_crtc_state->vrr_infopacket, 0,
9055                sizeof(new_crtc_state->vrr_infopacket));
9056 }
9057
9058 static bool
9059 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9060                                  struct drm_crtc_state *new_crtc_state)
9061 {
9062         struct drm_display_mode old_mode, new_mode;
9063
9064         if (!old_crtc_state || !new_crtc_state)
9065                 return false;
9066
9067         old_mode = old_crtc_state->mode;
9068         new_mode = new_crtc_state->mode;
9069
9070         if (old_mode.clock       == new_mode.clock &&
9071             old_mode.hdisplay    == new_mode.hdisplay &&
9072             old_mode.vdisplay    == new_mode.vdisplay &&
9073             old_mode.htotal      == new_mode.htotal &&
9074             old_mode.vtotal      != new_mode.vtotal &&
9075             old_mode.hsync_start == new_mode.hsync_start &&
9076             old_mode.vsync_start != new_mode.vsync_start &&
9077             old_mode.hsync_end   == new_mode.hsync_end &&
9078             old_mode.vsync_end   != new_mode.vsync_end &&
9079             old_mode.hskew       == new_mode.hskew &&
9080             old_mode.vscan       == new_mode.vscan &&
9081             (old_mode.vsync_end - old_mode.vsync_start) ==
9082             (new_mode.vsync_end - new_mode.vsync_start))
9083                 return true;
9084
9085         return false;
9086 }
9087
9088 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9089         uint64_t num, den, res;
9090         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9091
9092         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9093
9094         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9095         den = (unsigned long long)new_crtc_state->mode.htotal *
9096               (unsigned long long)new_crtc_state->mode.vtotal;
9097
9098         res = div_u64(num, den);
9099         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9100 }
9101
9102 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9103                                 struct drm_atomic_state *state,
9104                                 struct drm_crtc *crtc,
9105                                 struct drm_crtc_state *old_crtc_state,
9106                                 struct drm_crtc_state *new_crtc_state,
9107                                 bool enable,
9108                                 bool *lock_and_validation_needed)
9109 {
9110         struct dm_atomic_state *dm_state = NULL;
9111         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9112         struct dc_stream_state *new_stream;
9113         int ret = 0;
9114
9115         /*
9116          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9117          * update changed items
9118          */
9119         struct amdgpu_crtc *acrtc = NULL;
9120         struct amdgpu_dm_connector *aconnector = NULL;
9121         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9122         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9123
9124         new_stream = NULL;
9125
9126         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9127         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9128         acrtc = to_amdgpu_crtc(crtc);
9129         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9130
9131         /* TODO This hack should go away */
9132         if (aconnector && enable) {
9133                 /* Make sure fake sink is created in plug-in scenario */
9134                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9135                                                             &aconnector->base);
9136                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9137                                                             &aconnector->base);
9138
9139                 if (IS_ERR(drm_new_conn_state)) {
9140                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9141                         goto fail;
9142                 }
9143
9144                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9145                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9146
9147                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9148                         goto skip_modeset;
9149
9150                 new_stream = create_validate_stream_for_sink(aconnector,
9151                                                              &new_crtc_state->mode,
9152                                                              dm_new_conn_state,
9153                                                              dm_old_crtc_state->stream);
9154
9155                 /*
9156                  * we can have no stream on ACTION_SET if a display
9157                  * was disconnected during S3, in this case it is not an
9158                  * error, the OS will be updated after detection, and
9159                  * will do the right thing on next atomic commit
9160                  */
9161
9162                 if (!new_stream) {
9163                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9164                                         __func__, acrtc->base.base.id);
9165                         ret = -ENOMEM;
9166                         goto fail;
9167                 }
9168
9169                 /*
9170                  * TODO: Check VSDB bits to decide whether this should
9171                  * be enabled or not.
9172                  */
9173                 new_stream->triggered_crtc_reset.enabled =
9174                         dm->force_timing_sync;
9175
9176                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9177
9178                 ret = fill_hdr_info_packet(drm_new_conn_state,
9179                                            &new_stream->hdr_static_metadata);
9180                 if (ret)
9181                         goto fail;
9182
9183                 /*
9184                  * If we already removed the old stream from the context
9185                  * (and set the new stream to NULL) then we can't reuse
9186                  * the old stream even if the stream and scaling are unchanged.
9187                  * We'll hit the BUG_ON and black screen.
9188                  *
9189                  * TODO: Refactor this function to allow this check to work
9190                  * in all conditions.
9191                  */
9192                 if (amdgpu_freesync_vid_mode &&
9193                     dm_new_crtc_state->stream &&
9194                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9195                         goto skip_modeset;
9196
9197                 if (dm_new_crtc_state->stream &&
9198                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9199                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9200                         new_crtc_state->mode_changed = false;
9201                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9202                                          new_crtc_state->mode_changed);
9203                 }
9204         }
9205
9206         /* mode_changed flag may get updated above, need to check again */
9207         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9208                 goto skip_modeset;
9209
9210         DRM_DEBUG_DRIVER(
9211                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9212                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9213                 "connectors_changed:%d\n",
9214                 acrtc->crtc_id,
9215                 new_crtc_state->enable,
9216                 new_crtc_state->active,
9217                 new_crtc_state->planes_changed,
9218                 new_crtc_state->mode_changed,
9219                 new_crtc_state->active_changed,
9220                 new_crtc_state->connectors_changed);
9221
9222         /* Remove stream for any changed/disabled CRTC */
9223         if (!enable) {
9224
9225                 if (!dm_old_crtc_state->stream)
9226                         goto skip_modeset;
9227
9228                 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
9229                     is_timing_unchanged_for_freesync(new_crtc_state,
9230                                                      old_crtc_state)) {
9231                         new_crtc_state->mode_changed = false;
9232                         DRM_DEBUG_DRIVER(
9233                                 "Mode change not required for front porch change, "
9234                                 "setting mode_changed to %d",
9235                                 new_crtc_state->mode_changed);
9236
9237                         set_freesync_fixed_config(dm_new_crtc_state);
9238
9239                         goto skip_modeset;
9240                 } else if (amdgpu_freesync_vid_mode && aconnector &&
9241                            is_freesync_video_mode(&new_crtc_state->mode,
9242                                                   aconnector)) {
9243                         set_freesync_fixed_config(dm_new_crtc_state);
9244                 }
9245
9246                 ret = dm_atomic_get_state(state, &dm_state);
9247                 if (ret)
9248                         goto fail;
9249
9250                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9251                                 crtc->base.id);
9252
9253                 /* i.e. reset mode */
9254                 if (dc_remove_stream_from_ctx(
9255                                 dm->dc,
9256                                 dm_state->context,
9257                                 dm_old_crtc_state->stream) != DC_OK) {
9258                         ret = -EINVAL;
9259                         goto fail;
9260                 }
9261
9262                 dc_stream_release(dm_old_crtc_state->stream);
9263                 dm_new_crtc_state->stream = NULL;
9264
9265                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9266
9267                 *lock_and_validation_needed = true;
9268
9269         } else {/* Add stream for any updated/enabled CRTC */
9270                 /*
9271                  * Quick fix to prevent NULL pointer on new_stream when
9272                  * added MST connectors not found in existing crtc_state in the chained mode
9273                  * TODO: need to dig out the root cause of that
9274                  */
9275                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
9276                         goto skip_modeset;
9277
9278                 if (modereset_required(new_crtc_state))
9279                         goto skip_modeset;
9280
9281                 if (modeset_required(new_crtc_state, new_stream,
9282                                      dm_old_crtc_state->stream)) {
9283
9284                         WARN_ON(dm_new_crtc_state->stream);
9285
9286                         ret = dm_atomic_get_state(state, &dm_state);
9287                         if (ret)
9288                                 goto fail;
9289
9290                         dm_new_crtc_state->stream = new_stream;
9291
9292                         dc_stream_retain(new_stream);
9293
9294                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
9295                                                 crtc->base.id);
9296
9297                         if (dc_add_stream_to_ctx(
9298                                         dm->dc,
9299                                         dm_state->context,
9300                                         dm_new_crtc_state->stream) != DC_OK) {
9301                                 ret = -EINVAL;
9302                                 goto fail;
9303                         }
9304
9305                         *lock_and_validation_needed = true;
9306                 }
9307         }
9308
9309 skip_modeset:
9310         /* Release extra reference */
9311         if (new_stream)
9312                  dc_stream_release(new_stream);
9313
9314         /*
9315          * We want to do dc stream updates that do not require a
9316          * full modeset below.
9317          */
9318         if (!(enable && aconnector && new_crtc_state->active))
9319                 return 0;
9320         /*
9321          * Given above conditions, the dc state cannot be NULL because:
9322          * 1. We're in the process of enabling CRTCs (just been added
9323          *    to the dc context, or already is on the context)
9324          * 2. Has a valid connector attached, and
9325          * 3. Is currently active and enabled.
9326          * => The dc stream state currently exists.
9327          */
9328         BUG_ON(dm_new_crtc_state->stream == NULL);
9329
9330         /* Scaling or underscan settings */
9331         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
9332                 update_stream_scaling_settings(
9333                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9334
9335         /* ABM settings */
9336         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9337
9338         /*
9339          * Color management settings. We also update color properties
9340          * when a modeset is needed, to ensure it gets reprogrammed.
9341          */
9342         if (dm_new_crtc_state->base.color_mgmt_changed ||
9343             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9344                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9345                 if (ret)
9346                         goto fail;
9347         }
9348
9349         /* Update Freesync settings. */
9350         get_freesync_config_for_crtc(dm_new_crtc_state,
9351                                      dm_new_conn_state);
9352
9353         return ret;
9354
9355 fail:
9356         if (new_stream)
9357                 dc_stream_release(new_stream);
9358         return ret;
9359 }
9360
9361 static bool should_reset_plane(struct drm_atomic_state *state,
9362                                struct drm_plane *plane,
9363                                struct drm_plane_state *old_plane_state,
9364                                struct drm_plane_state *new_plane_state)
9365 {
9366         struct drm_plane *other;
9367         struct drm_plane_state *old_other_state, *new_other_state;
9368         struct drm_crtc_state *new_crtc_state;
9369         int i;
9370
9371         /*
9372          * TODO: Remove this hack once the checks below are sufficient
9373          * enough to determine when we need to reset all the planes on
9374          * the stream.
9375          */
9376         if (state->allow_modeset)
9377                 return true;
9378
9379         /* Exit early if we know that we're adding or removing the plane. */
9380         if (old_plane_state->crtc != new_plane_state->crtc)
9381                 return true;
9382
9383         /* old crtc == new_crtc == NULL, plane not in context. */
9384         if (!new_plane_state->crtc)
9385                 return false;
9386
9387         new_crtc_state =
9388                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9389
9390         if (!new_crtc_state)
9391                 return true;
9392
9393         /* CRTC Degamma changes currently require us to recreate planes. */
9394         if (new_crtc_state->color_mgmt_changed)
9395                 return true;
9396
9397         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9398                 return true;
9399
9400         /*
9401          * If there are any new primary or overlay planes being added or
9402          * removed then the z-order can potentially change. To ensure
9403          * correct z-order and pipe acquisition the current DC architecture
9404          * requires us to remove and recreate all existing planes.
9405          *
9406          * TODO: Come up with a more elegant solution for this.
9407          */
9408         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9409                 struct amdgpu_framebuffer *old_afb, *new_afb;
9410                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9411                         continue;
9412
9413                 if (old_other_state->crtc != new_plane_state->crtc &&
9414                     new_other_state->crtc != new_plane_state->crtc)
9415                         continue;
9416
9417                 if (old_other_state->crtc != new_other_state->crtc)
9418                         return true;
9419
9420                 /* Src/dst size and scaling updates. */
9421                 if (old_other_state->src_w != new_other_state->src_w ||
9422                     old_other_state->src_h != new_other_state->src_h ||
9423                     old_other_state->crtc_w != new_other_state->crtc_w ||
9424                     old_other_state->crtc_h != new_other_state->crtc_h)
9425                         return true;
9426
9427                 /* Rotation / mirroring updates. */
9428                 if (old_other_state->rotation != new_other_state->rotation)
9429                         return true;
9430
9431                 /* Blending updates. */
9432                 if (old_other_state->pixel_blend_mode !=
9433                     new_other_state->pixel_blend_mode)
9434                         return true;
9435
9436                 /* Alpha updates. */
9437                 if (old_other_state->alpha != new_other_state->alpha)
9438                         return true;
9439
9440                 /* Colorspace changes. */
9441                 if (old_other_state->color_range != new_other_state->color_range ||
9442                     old_other_state->color_encoding != new_other_state->color_encoding)
9443                         return true;
9444
9445                 /* Framebuffer checks fall at the end. */
9446                 if (!old_other_state->fb || !new_other_state->fb)
9447                         continue;
9448
9449                 /* Pixel format changes can require bandwidth updates. */
9450                 if (old_other_state->fb->format != new_other_state->fb->format)
9451                         return true;
9452
9453                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9454                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9455
9456                 /* Tiling and DCC changes also require bandwidth updates. */
9457                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9458                     old_afb->base.modifier != new_afb->base.modifier)
9459                         return true;
9460         }
9461
9462         return false;
9463 }
9464
9465 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9466                               struct drm_plane_state *new_plane_state,
9467                               struct drm_framebuffer *fb)
9468 {
9469         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9470         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9471         unsigned int pitch;
9472         bool linear;
9473
9474         if (fb->width > new_acrtc->max_cursor_width ||
9475             fb->height > new_acrtc->max_cursor_height) {
9476                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9477                                  new_plane_state->fb->width,
9478                                  new_plane_state->fb->height);
9479                 return -EINVAL;
9480         }
9481         if (new_plane_state->src_w != fb->width << 16 ||
9482             new_plane_state->src_h != fb->height << 16) {
9483                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9484                 return -EINVAL;
9485         }
9486
9487         /* Pitch in pixels */
9488         pitch = fb->pitches[0] / fb->format->cpp[0];
9489
9490         if (fb->width != pitch) {
9491                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9492                                  fb->width, pitch);
9493                 return -EINVAL;
9494         }
9495
9496         switch (pitch) {
9497         case 64:
9498         case 128:
9499         case 256:
9500                 /* FB pitch is supported by cursor plane */
9501                 break;
9502         default:
9503                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9504                 return -EINVAL;
9505         }
9506
9507         /* Core DRM takes care of checking FB modifiers, so we only need to
9508          * check tiling flags when the FB doesn't have a modifier. */
9509         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9510                 if (adev->family < AMDGPU_FAMILY_AI) {
9511                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9512                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9513                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9514                 } else {
9515                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9516                 }
9517                 if (!linear) {
9518                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9519                         return -EINVAL;
9520                 }
9521         }
9522
9523         return 0;
9524 }
9525
9526 static int dm_update_plane_state(struct dc *dc,
9527                                  struct drm_atomic_state *state,
9528                                  struct drm_plane *plane,
9529                                  struct drm_plane_state *old_plane_state,
9530                                  struct drm_plane_state *new_plane_state,
9531                                  bool enable,
9532                                  bool *lock_and_validation_needed)
9533 {
9534
9535         struct dm_atomic_state *dm_state = NULL;
9536         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9537         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9538         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9539         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9540         struct amdgpu_crtc *new_acrtc;
9541         bool needs_reset;
9542         int ret = 0;
9543
9544
9545         new_plane_crtc = new_plane_state->crtc;
9546         old_plane_crtc = old_plane_state->crtc;
9547         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9548         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9549
9550         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9551                 if (!enable || !new_plane_crtc ||
9552                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9553                         return 0;
9554
9555                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9556
9557                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9558                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9559                         return -EINVAL;
9560                 }
9561
9562                 if (new_plane_state->fb) {
9563                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9564                                                  new_plane_state->fb);
9565                         if (ret)
9566                                 return ret;
9567                 }
9568
9569                 return 0;
9570         }
9571
9572         needs_reset = should_reset_plane(state, plane, old_plane_state,
9573                                          new_plane_state);
9574
9575         /* Remove any changed/removed planes */
9576         if (!enable) {
9577                 if (!needs_reset)
9578                         return 0;
9579
9580                 if (!old_plane_crtc)
9581                         return 0;
9582
9583                 old_crtc_state = drm_atomic_get_old_crtc_state(
9584                                 state, old_plane_crtc);
9585                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9586
9587                 if (!dm_old_crtc_state->stream)
9588                         return 0;
9589
9590                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9591                                 plane->base.id, old_plane_crtc->base.id);
9592
9593                 ret = dm_atomic_get_state(state, &dm_state);
9594                 if (ret)
9595                         return ret;
9596
9597                 if (!dc_remove_plane_from_context(
9598                                 dc,
9599                                 dm_old_crtc_state->stream,
9600                                 dm_old_plane_state->dc_state,
9601                                 dm_state->context)) {
9602
9603                         return -EINVAL;
9604                 }
9605
9606
9607                 dc_plane_state_release(dm_old_plane_state->dc_state);
9608                 dm_new_plane_state->dc_state = NULL;
9609
9610                 *lock_and_validation_needed = true;
9611
9612         } else { /* Add new planes */
9613                 struct dc_plane_state *dc_new_plane_state;
9614
9615                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9616                         return 0;
9617
9618                 if (!new_plane_crtc)
9619                         return 0;
9620
9621                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9622                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9623
9624                 if (!dm_new_crtc_state->stream)
9625                         return 0;
9626
9627                 if (!needs_reset)
9628                         return 0;
9629
9630                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9631                 if (ret)
9632                         return ret;
9633
9634                 WARN_ON(dm_new_plane_state->dc_state);
9635
9636                 dc_new_plane_state = dc_create_plane_state(dc);
9637                 if (!dc_new_plane_state)
9638                         return -ENOMEM;
9639
9640                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9641                                 plane->base.id, new_plane_crtc->base.id);
9642
9643                 ret = fill_dc_plane_attributes(
9644                         drm_to_adev(new_plane_crtc->dev),
9645                         dc_new_plane_state,
9646                         new_plane_state,
9647                         new_crtc_state);
9648                 if (ret) {
9649                         dc_plane_state_release(dc_new_plane_state);
9650                         return ret;
9651                 }
9652
9653                 ret = dm_atomic_get_state(state, &dm_state);
9654                 if (ret) {
9655                         dc_plane_state_release(dc_new_plane_state);
9656                         return ret;
9657                 }
9658
9659                 /*
9660                  * Any atomic check errors that occur after this will
9661                  * not need a release. The plane state will be attached
9662                  * to the stream, and therefore part of the atomic
9663                  * state. It'll be released when the atomic state is
9664                  * cleaned.
9665                  */
9666                 if (!dc_add_plane_to_context(
9667                                 dc,
9668                                 dm_new_crtc_state->stream,
9669                                 dc_new_plane_state,
9670                                 dm_state->context)) {
9671
9672                         dc_plane_state_release(dc_new_plane_state);
9673                         return -EINVAL;
9674                 }
9675
9676                 dm_new_plane_state->dc_state = dc_new_plane_state;
9677
9678                 /* Tell DC to do a full surface update every time there
9679                  * is a plane change. Inefficient, but works for now.
9680                  */
9681                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9682
9683                 *lock_and_validation_needed = true;
9684         }
9685
9686
9687         return ret;
9688 }
9689
9690 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9691                                 struct drm_crtc *crtc,
9692                                 struct drm_crtc_state *new_crtc_state)
9693 {
9694         struct drm_plane_state *new_cursor_state, *new_primary_state;
9695         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9696
9697         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9698          * cursor per pipe but it's going to inherit the scaling and
9699          * positioning from the underlying pipe. Check the cursor plane's
9700          * blending properties match the primary plane's. */
9701
9702         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9703         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9704         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9705                 return 0;
9706         }
9707
9708         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9709                          (new_cursor_state->src_w >> 16);
9710         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9711                          (new_cursor_state->src_h >> 16);
9712
9713         primary_scale_w = new_primary_state->crtc_w * 1000 /
9714                          (new_primary_state->src_w >> 16);
9715         primary_scale_h = new_primary_state->crtc_h * 1000 /
9716                          (new_primary_state->src_h >> 16);
9717
9718         if (cursor_scale_w != primary_scale_w ||
9719             cursor_scale_h != primary_scale_h) {
9720                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9721                 return -EINVAL;
9722         }
9723
9724         return 0;
9725 }
9726
9727 #if defined(CONFIG_DRM_AMD_DC_DCN)
9728 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9729 {
9730         struct drm_connector *connector;
9731         struct drm_connector_state *conn_state;
9732         struct amdgpu_dm_connector *aconnector = NULL;
9733         int i;
9734         for_each_new_connector_in_state(state, connector, conn_state, i) {
9735                 if (conn_state->crtc != crtc)
9736                         continue;
9737
9738                 aconnector = to_amdgpu_dm_connector(connector);
9739                 if (!aconnector->port || !aconnector->mst_port)
9740                         aconnector = NULL;
9741                 else
9742                         break;
9743         }
9744
9745         if (!aconnector)
9746                 return 0;
9747
9748         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9749 }
9750 #endif
9751
9752 /**
9753  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9754  * @dev: The DRM device
9755  * @state: The atomic state to commit
9756  *
9757  * Validate that the given atomic state is programmable by DC into hardware.
9758  * This involves constructing a &struct dc_state reflecting the new hardware
9759  * state we wish to commit, then querying DC to see if it is programmable. It's
9760  * important not to modify the existing DC state. Otherwise, atomic_check
9761  * may unexpectedly commit hardware changes.
9762  *
9763  * When validating the DC state, it's important that the right locks are
9764  * acquired. For full updates case which removes/adds/updates streams on one
9765  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9766  * that any such full update commit will wait for completion of any outstanding
9767  * flip using DRMs synchronization events.
9768  *
9769  * Note that DM adds the affected connectors for all CRTCs in state, when that
9770  * might not seem necessary. This is because DC stream creation requires the
9771  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9772  * be possible but non-trivial - a possible TODO item.
9773  *
9774  * Return: -Error code if validation failed.
9775  */
9776 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9777                                   struct drm_atomic_state *state)
9778 {
9779         struct amdgpu_device *adev = drm_to_adev(dev);
9780         struct dm_atomic_state *dm_state = NULL;
9781         struct dc *dc = adev->dm.dc;
9782         struct drm_connector *connector;
9783         struct drm_connector_state *old_con_state, *new_con_state;
9784         struct drm_crtc *crtc;
9785         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9786         struct drm_plane *plane;
9787         struct drm_plane_state *old_plane_state, *new_plane_state;
9788         enum dc_status status;
9789         int ret, i;
9790         bool lock_and_validation_needed = false;
9791         struct dm_crtc_state *dm_old_crtc_state;
9792
9793         trace_amdgpu_dm_atomic_check_begin(state);
9794
9795         ret = drm_atomic_helper_check_modeset(dev, state);
9796         if (ret)
9797                 goto fail;
9798
9799         /* Check connector changes */
9800         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9801                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9802                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9803
9804                 /* Skip connectors that are disabled or part of modeset already. */
9805                 if (!old_con_state->crtc && !new_con_state->crtc)
9806                         continue;
9807
9808                 if (!new_con_state->crtc)
9809                         continue;
9810
9811                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9812                 if (IS_ERR(new_crtc_state)) {
9813                         ret = PTR_ERR(new_crtc_state);
9814                         goto fail;
9815                 }
9816
9817                 if (dm_old_con_state->abm_level !=
9818                     dm_new_con_state->abm_level)
9819                         new_crtc_state->connectors_changed = true;
9820         }
9821
9822 #if defined(CONFIG_DRM_AMD_DC_DCN)
9823         if (dc_resource_is_dsc_encoding_supported(dc)) {
9824                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9825                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9826                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9827                                 if (ret)
9828                                         goto fail;
9829                         }
9830                 }
9831         }
9832 #endif
9833         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9834                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9835
9836                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9837                     !new_crtc_state->color_mgmt_changed &&
9838                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9839                         dm_old_crtc_state->dsc_force_changed == false)
9840                         continue;
9841
9842                 if (!new_crtc_state->enable)
9843                         continue;
9844
9845                 ret = drm_atomic_add_affected_connectors(state, crtc);
9846                 if (ret)
9847                         return ret;
9848
9849                 ret = drm_atomic_add_affected_planes(state, crtc);
9850                 if (ret)
9851                         goto fail;
9852
9853                 if (dm_old_crtc_state->dsc_force_changed)
9854                         new_crtc_state->mode_changed = true;
9855         }
9856
9857         /*
9858          * Add all primary and overlay planes on the CRTC to the state
9859          * whenever a plane is enabled to maintain correct z-ordering
9860          * and to enable fast surface updates.
9861          */
9862         drm_for_each_crtc(crtc, dev) {
9863                 bool modified = false;
9864
9865                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9866                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9867                                 continue;
9868
9869                         if (new_plane_state->crtc == crtc ||
9870                             old_plane_state->crtc == crtc) {
9871                                 modified = true;
9872                                 break;
9873                         }
9874                 }
9875
9876                 if (!modified)
9877                         continue;
9878
9879                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9880                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9881                                 continue;
9882
9883                         new_plane_state =
9884                                 drm_atomic_get_plane_state(state, plane);
9885
9886                         if (IS_ERR(new_plane_state)) {
9887                                 ret = PTR_ERR(new_plane_state);
9888                                 goto fail;
9889                         }
9890                 }
9891         }
9892
9893         /* Remove exiting planes if they are modified */
9894         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9895                 ret = dm_update_plane_state(dc, state, plane,
9896                                             old_plane_state,
9897                                             new_plane_state,
9898                                             false,
9899                                             &lock_and_validation_needed);
9900                 if (ret)
9901                         goto fail;
9902         }
9903
9904         /* Disable all crtcs which require disable */
9905         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9906                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9907                                            old_crtc_state,
9908                                            new_crtc_state,
9909                                            false,
9910                                            &lock_and_validation_needed);
9911                 if (ret)
9912                         goto fail;
9913         }
9914
9915         /* Enable all crtcs which require enable */
9916         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9917                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9918                                            old_crtc_state,
9919                                            new_crtc_state,
9920                                            true,
9921                                            &lock_and_validation_needed);
9922                 if (ret)
9923                         goto fail;
9924         }
9925
9926         /* Add new/modified planes */
9927         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9928                 ret = dm_update_plane_state(dc, state, plane,
9929                                             old_plane_state,
9930                                             new_plane_state,
9931                                             true,
9932                                             &lock_and_validation_needed);
9933                 if (ret)
9934                         goto fail;
9935         }
9936
9937         /* Run this here since we want to validate the streams we created */
9938         ret = drm_atomic_helper_check_planes(dev, state);
9939         if (ret)
9940                 goto fail;
9941
9942         /* Check cursor planes scaling */
9943         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9944                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9945                 if (ret)
9946                         goto fail;
9947         }
9948
9949         if (state->legacy_cursor_update) {
9950                 /*
9951                  * This is a fast cursor update coming from the plane update
9952                  * helper, check if it can be done asynchronously for better
9953                  * performance.
9954                  */
9955                 state->async_update =
9956                         !drm_atomic_helper_async_check(dev, state);
9957
9958                 /*
9959                  * Skip the remaining global validation if this is an async
9960                  * update. Cursor updates can be done without affecting
9961                  * state or bandwidth calcs and this avoids the performance
9962                  * penalty of locking the private state object and
9963                  * allocating a new dc_state.
9964                  */
9965                 if (state->async_update)
9966                         return 0;
9967         }
9968
9969         /* Check scaling and underscan changes*/
9970         /* TODO Removed scaling changes validation due to inability to commit
9971          * new stream into context w\o causing full reset. Need to
9972          * decide how to handle.
9973          */
9974         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9975                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9976                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9977                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9978
9979                 /* Skip any modesets/resets */
9980                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9981                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9982                         continue;
9983
9984                 /* Skip any thing not scale or underscan changes */
9985                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9986                         continue;
9987
9988                 lock_and_validation_needed = true;
9989         }
9990
9991         /**
9992          * Streams and planes are reset when there are changes that affect
9993          * bandwidth. Anything that affects bandwidth needs to go through
9994          * DC global validation to ensure that the configuration can be applied
9995          * to hardware.
9996          *
9997          * We have to currently stall out here in atomic_check for outstanding
9998          * commits to finish in this case because our IRQ handlers reference
9999          * DRM state directly - we can end up disabling interrupts too early
10000          * if we don't.
10001          *
10002          * TODO: Remove this stall and drop DM state private objects.
10003          */
10004         if (lock_and_validation_needed) {
10005                 ret = dm_atomic_get_state(state, &dm_state);
10006                 if (ret)
10007                         goto fail;
10008
10009                 ret = do_aquire_global_lock(dev, state);
10010                 if (ret)
10011                         goto fail;
10012
10013 #if defined(CONFIG_DRM_AMD_DC_DCN)
10014                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
10015                         goto fail;
10016
10017                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
10018                 if (ret)
10019                         goto fail;
10020 #endif
10021
10022                 /*
10023                  * Perform validation of MST topology in the state:
10024                  * We need to perform MST atomic check before calling
10025                  * dc_validate_global_state(), or there is a chance
10026                  * to get stuck in an infinite loop and hang eventually.
10027                  */
10028                 ret = drm_dp_mst_atomic_check(state);
10029                 if (ret)
10030                         goto fail;
10031                 status = dc_validate_global_state(dc, dm_state->context, false);
10032                 if (status != DC_OK) {
10033                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
10034                                        dc_status_to_str(status), status);
10035                         ret = -EINVAL;
10036                         goto fail;
10037                 }
10038         } else {
10039                 /*
10040                  * The commit is a fast update. Fast updates shouldn't change
10041                  * the DC context, affect global validation, and can have their
10042                  * commit work done in parallel with other commits not touching
10043                  * the same resource. If we have a new DC context as part of
10044                  * the DM atomic state from validation we need to free it and
10045                  * retain the existing one instead.
10046                  *
10047                  * Furthermore, since the DM atomic state only contains the DC
10048                  * context and can safely be annulled, we can free the state
10049                  * and clear the associated private object now to free
10050                  * some memory and avoid a possible use-after-free later.
10051                  */
10052
10053                 for (i = 0; i < state->num_private_objs; i++) {
10054                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10055
10056                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10057                                 int j = state->num_private_objs-1;
10058
10059                                 dm_atomic_destroy_state(obj,
10060                                                 state->private_objs[i].state);
10061
10062                                 /* If i is not at the end of the array then the
10063                                  * last element needs to be moved to where i was
10064                                  * before the array can safely be truncated.
10065                                  */
10066                                 if (i != j)
10067                                         state->private_objs[i] =
10068                                                 state->private_objs[j];
10069
10070                                 state->private_objs[j].ptr = NULL;
10071                                 state->private_objs[j].state = NULL;
10072                                 state->private_objs[j].old_state = NULL;
10073                                 state->private_objs[j].new_state = NULL;
10074
10075                                 state->num_private_objs = j;
10076                                 break;
10077                         }
10078                 }
10079         }
10080
10081         /* Store the overall update type for use later in atomic check. */
10082         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
10083                 struct dm_crtc_state *dm_new_crtc_state =
10084                         to_dm_crtc_state(new_crtc_state);
10085
10086                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10087                                                          UPDATE_TYPE_FULL :
10088                                                          UPDATE_TYPE_FAST;
10089         }
10090
10091         /* Must be success */
10092         WARN_ON(ret);
10093
10094         trace_amdgpu_dm_atomic_check_finish(state, ret);
10095
10096         return ret;
10097
10098 fail:
10099         if (ret == -EDEADLK)
10100                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10101         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10102                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10103         else
10104                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
10105
10106         trace_amdgpu_dm_atomic_check_finish(state, ret);
10107
10108         return ret;
10109 }
10110
10111 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10112                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10113 {
10114         uint8_t dpcd_data;
10115         bool capable = false;
10116
10117         if (amdgpu_dm_connector->dc_link &&
10118                 dm_helpers_dp_read_dpcd(
10119                                 NULL,
10120                                 amdgpu_dm_connector->dc_link,
10121                                 DP_DOWN_STREAM_PORT_COUNT,
10122                                 &dpcd_data,
10123                                 sizeof(dpcd_data))) {
10124                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10125         }
10126
10127         return capable;
10128 }
10129
10130 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10131                 uint8_t *edid_ext, int len,
10132                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10133 {
10134         int i;
10135         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10136         struct dc *dc = adev->dm.dc;
10137
10138         /* send extension block to DMCU for parsing */
10139         for (i = 0; i < len; i += 8) {
10140                 bool res;
10141                 int offset;
10142
10143                 /* send 8 bytes a time */
10144                 if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8))
10145                         return false;
10146
10147                 if (i+8 == len) {
10148                         /* EDID block sent completed, expect result */
10149                         int version, min_rate, max_rate;
10150
10151                         res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate);
10152                         if (res) {
10153                                 /* amd vsdb found */
10154                                 vsdb_info->freesync_supported = 1;
10155                                 vsdb_info->amd_vsdb_version = version;
10156                                 vsdb_info->min_refresh_rate_hz = min_rate;
10157                                 vsdb_info->max_refresh_rate_hz = max_rate;
10158                                 return true;
10159                         }
10160                         /* not amd vsdb */
10161                         return false;
10162                 }
10163
10164                 /* check for ack*/
10165                 res = dc_edid_parser_recv_cea_ack(dc, &offset);
10166                 if (!res)
10167                         return false;
10168         }
10169
10170         return false;
10171 }
10172
10173 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10174                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10175 {
10176         uint8_t *edid_ext = NULL;
10177         int i;
10178         bool valid_vsdb_found = false;
10179
10180         /*----- drm_find_cea_extension() -----*/
10181         /* No EDID or EDID extensions */
10182         if (edid == NULL || edid->extensions == 0)
10183                 return -ENODEV;
10184
10185         /* Find CEA extension */
10186         for (i = 0; i < edid->extensions; i++) {
10187                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10188                 if (edid_ext[0] == CEA_EXT)
10189                         break;
10190         }
10191
10192         if (i == edid->extensions)
10193                 return -ENODEV;
10194
10195         /*----- cea_db_offsets() -----*/
10196         if (edid_ext[0] != CEA_EXT)
10197                 return -ENODEV;
10198
10199         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10200
10201         return valid_vsdb_found ? i : -ENODEV;
10202 }
10203
10204 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10205                                         struct edid *edid)
10206 {
10207         int i = 0;
10208         struct detailed_timing *timing;
10209         struct detailed_non_pixel *data;
10210         struct detailed_data_monitor_range *range;
10211         struct amdgpu_dm_connector *amdgpu_dm_connector =
10212                         to_amdgpu_dm_connector(connector);
10213         struct dm_connector_state *dm_con_state = NULL;
10214
10215         struct drm_device *dev = connector->dev;
10216         struct amdgpu_device *adev = drm_to_adev(dev);
10217         bool freesync_capable = false;
10218         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10219
10220         if (!connector->state) {
10221                 DRM_ERROR("%s - Connector has no state", __func__);
10222                 goto update;
10223         }
10224
10225         if (!edid) {
10226                 dm_con_state = to_dm_connector_state(connector->state);
10227
10228                 amdgpu_dm_connector->min_vfreq = 0;
10229                 amdgpu_dm_connector->max_vfreq = 0;
10230                 amdgpu_dm_connector->pixel_clock_mhz = 0;
10231
10232                 goto update;
10233         }
10234
10235         dm_con_state = to_dm_connector_state(connector->state);
10236
10237         if (!amdgpu_dm_connector->dc_sink) {
10238                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
10239                 goto update;
10240         }
10241         if (!adev->dm.freesync_module)
10242                 goto update;
10243
10244
10245         if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10246                 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
10247                 bool edid_check_required = false;
10248
10249                 if (edid) {
10250                         edid_check_required = is_dp_capable_without_timing_msa(
10251                                                 adev->dm.dc,
10252                                                 amdgpu_dm_connector);
10253                 }
10254
10255                 if (edid_check_required == true && (edid->version > 1 ||
10256                    (edid->version == 1 && edid->revision > 1))) {
10257                         for (i = 0; i < 4; i++) {
10258
10259                                 timing  = &edid->detailed_timings[i];
10260                                 data    = &timing->data.other_data;
10261                                 range   = &data->data.range;
10262                                 /*
10263                                  * Check if monitor has continuous frequency mode
10264                                  */
10265                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10266                                         continue;
10267                                 /*
10268                                  * Check for flag range limits only. If flag == 1 then
10269                                  * no additional timing information provided.
10270                                  * Default GTF, GTF Secondary curve and CVT are not
10271                                  * supported
10272                                  */
10273                                 if (range->flags != 1)
10274                                         continue;
10275
10276                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10277                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10278                                 amdgpu_dm_connector->pixel_clock_mhz =
10279                                         range->pixel_clock_mhz * 10;
10280
10281                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10282                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10283
10284                                 break;
10285                         }
10286
10287                         if (amdgpu_dm_connector->max_vfreq -
10288                             amdgpu_dm_connector->min_vfreq > 10) {
10289
10290                                 freesync_capable = true;
10291                         }
10292                 }
10293         } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10294                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10295                 if (i >= 0 && vsdb_info.freesync_supported) {
10296                         timing  = &edid->detailed_timings[i];
10297                         data    = &timing->data.other_data;
10298
10299                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10300                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10301                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10302                                 freesync_capable = true;
10303
10304                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10305                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10306                 }
10307         }
10308
10309 update:
10310         if (dm_con_state)
10311                 dm_con_state->freesync_capable = freesync_capable;
10312
10313         if (connector->vrr_capable_property)
10314                 drm_connector_set_vrr_capable_property(connector,
10315                                                        freesync_capable);
10316 }
10317
10318 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
10319 {
10320         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
10321
10322         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
10323                 return;
10324         if (link->type == dc_connection_none)
10325                 return;
10326         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
10327                                         dpcd_data, sizeof(dpcd_data))) {
10328                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
10329
10330                 if (dpcd_data[0] == 0) {
10331                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
10332                         link->psr_settings.psr_feature_enabled = false;
10333                 } else {
10334                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
10335                         link->psr_settings.psr_feature_enabled = true;
10336                 }
10337
10338                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
10339         }
10340 }
10341
10342 /*
10343  * amdgpu_dm_link_setup_psr() - configure psr link
10344  * @stream: stream state
10345  *
10346  * Return: true if success
10347  */
10348 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
10349 {
10350         struct dc_link *link = NULL;
10351         struct psr_config psr_config = {0};
10352         struct psr_context psr_context = {0};
10353         bool ret = false;
10354
10355         if (stream == NULL)
10356                 return false;
10357
10358         link = stream->link;
10359
10360         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
10361
10362         if (psr_config.psr_version > 0) {
10363                 psr_config.psr_exit_link_training_required = 0x1;
10364                 psr_config.psr_frame_capture_indication_req = 0;
10365                 psr_config.psr_rfb_setup_time = 0x37;
10366                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
10367                 psr_config.allow_smu_optimizations = 0x0;
10368
10369                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
10370
10371         }
10372         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
10373
10374         return ret;
10375 }
10376
10377 /*
10378  * amdgpu_dm_psr_enable() - enable psr f/w
10379  * @stream: stream state
10380  *
10381  * Return: true if success
10382  */
10383 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
10384 {
10385         struct dc_link *link = stream->link;
10386         unsigned int vsync_rate_hz = 0;
10387         struct dc_static_screen_params params = {0};
10388         /* Calculate number of static frames before generating interrupt to
10389          * enter PSR.
10390          */
10391         // Init fail safe of 2 frames static
10392         unsigned int num_frames_static = 2;
10393
10394         DRM_DEBUG_DRIVER("Enabling psr...\n");
10395
10396         vsync_rate_hz = div64_u64(div64_u64((
10397                         stream->timing.pix_clk_100hz * 100),
10398                         stream->timing.v_total),
10399                         stream->timing.h_total);
10400
10401         /* Round up
10402          * Calculate number of frames such that at least 30 ms of time has
10403          * passed.
10404          */
10405         if (vsync_rate_hz != 0) {
10406                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
10407                 num_frames_static = (30000 / frame_time_microsec) + 1;
10408         }
10409
10410         params.triggers.cursor_update = true;
10411         params.triggers.overlay_update = true;
10412         params.triggers.surface_update = true;
10413         params.num_frames = num_frames_static;
10414
10415         dc_stream_set_static_screen_params(link->ctx->dc,
10416                                            &stream, 1,
10417                                            &params);
10418
10419         return dc_link_set_psr_allow_active(link, true, false, false);
10420 }
10421
10422 /*
10423  * amdgpu_dm_psr_disable() - disable psr f/w
10424  * @stream:  stream state
10425  *
10426  * Return: true if success
10427  */
10428 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
10429 {
10430
10431         DRM_DEBUG_DRIVER("Disabling psr...\n");
10432
10433         return dc_link_set_psr_allow_active(stream->link, false, true, false);
10434 }
10435
10436 /*
10437  * amdgpu_dm_psr_disable() - disable psr f/w
10438  * if psr is enabled on any stream
10439  *
10440  * Return: true if success
10441  */
10442 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
10443 {
10444         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
10445         return dc_set_psr_allow_active(dm->dc, false);
10446 }
10447
10448 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10449 {
10450         struct amdgpu_device *adev = drm_to_adev(dev);
10451         struct dc *dc = adev->dm.dc;
10452         int i;
10453
10454         mutex_lock(&adev->dm.dc_lock);
10455         if (dc->current_state) {
10456                 for (i = 0; i < dc->current_state->stream_count; ++i)
10457                         dc->current_state->streams[i]
10458                                 ->triggered_crtc_reset.enabled =
10459                                 adev->dm.force_timing_sync;
10460
10461                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10462                 dc_trigger_sync(dc, dc->current_state);
10463         }
10464         mutex_unlock(&adev->dm.dc_lock);
10465 }
10466
10467 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10468                        uint32_t value, const char *func_name)
10469 {
10470 #ifdef DM_CHECK_ADDR_0
10471         if (address == 0) {
10472                 DC_ERR("invalid register write. address = 0");
10473                 return;
10474         }
10475 #endif
10476         cgs_write_register(ctx->cgs_device, address, value);
10477         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10478 }
10479
10480 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10481                           const char *func_name)
10482 {
10483         uint32_t value;
10484 #ifdef DM_CHECK_ADDR_0
10485         if (address == 0) {
10486                 DC_ERR("invalid register read; address = 0\n");
10487                 return 0;
10488         }
10489 #endif
10490
10491         if (ctx->dmub_srv &&
10492             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10493             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10494                 ASSERT(false);
10495                 return 0;
10496         }
10497
10498         value = cgs_read_register(ctx->cgs_device, address);
10499
10500         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10501
10502         return value;
10503 }