Merge tag 'amd-drm-next-5.8-2020-05-12' of git://people.freedesktop.org/~agd5f/linux...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97
98 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
100
101 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
102 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
103
104 /* Number of bytes in PSP header for firmware. */
105 #define PSP_HEADER_BYTES 0x100
106
107 /* Number of bytes in PSP footer for firmware. */
108 #define PSP_FOOTER_BYTES 0x100
109
110 /**
111  * DOC: overview
112  *
113  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
114  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
115  * requests into DC requests, and DC responses into DRM responses.
116  *
117  * The root control structure is &struct amdgpu_display_manager.
118  */
119
120 /* basic init/fini API */
121 static int amdgpu_dm_init(struct amdgpu_device *adev);
122 static void amdgpu_dm_fini(struct amdgpu_device *adev);
123
124 /*
125  * initializes drm_device display related structures, based on the information
126  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
127  * drm_encoder, drm_mode_config
128  *
129  * Returns 0 on success
130  */
131 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
132 /* removes and deallocates the drm structures, created by the above function */
133 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
134
135 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
136                                 struct drm_plane *plane,
137                                 unsigned long possible_crtcs,
138                                 const struct dc_plane_cap *plane_cap);
139 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
140                                struct drm_plane *plane,
141                                uint32_t link_index);
142 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
143                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
144                                     uint32_t link_index,
145                                     struct amdgpu_encoder *amdgpu_encoder);
146 static int amdgpu_dm_encoder_init(struct drm_device *dev,
147                                   struct amdgpu_encoder *aencoder,
148                                   uint32_t link_index);
149
150 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
151
152 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
153                                    struct drm_atomic_state *state,
154                                    bool nonblock);
155
156 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
157
158 static int amdgpu_dm_atomic_check(struct drm_device *dev,
159                                   struct drm_atomic_state *state);
160
161 static void handle_cursor_update(struct drm_plane *plane,
162                                  struct drm_plane_state *old_plane_state);
163
164 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
165 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
166 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
167 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
168
169
170 /*
171  * dm_vblank_get_counter
172  *
173  * @brief
174  * Get counter for number of vertical blanks
175  *
176  * @param
177  * struct amdgpu_device *adev - [in] desired amdgpu device
178  * int disp_idx - [in] which CRTC to get the counter from
179  *
180  * @return
181  * Counter for vertical blanks
182  */
183 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
184 {
185         if (crtc >= adev->mode_info.num_crtc)
186                 return 0;
187         else {
188                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
189                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
190                                 acrtc->base.state);
191
192
193                 if (acrtc_state->stream == NULL) {
194                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
195                                   crtc);
196                         return 0;
197                 }
198
199                 return dc_stream_get_vblank_counter(acrtc_state->stream);
200         }
201 }
202
203 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
204                                   u32 *vbl, u32 *position)
205 {
206         uint32_t v_blank_start, v_blank_end, h_position, v_position;
207
208         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
209                 return -EINVAL;
210         else {
211                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
212                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
213                                                 acrtc->base.state);
214
215                 if (acrtc_state->stream ==  NULL) {
216                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
217                                   crtc);
218                         return 0;
219                 }
220
221                 /*
222                  * TODO rework base driver to use values directly.
223                  * for now parse it back into reg-format
224                  */
225                 dc_stream_get_scanoutpos(acrtc_state->stream,
226                                          &v_blank_start,
227                                          &v_blank_end,
228                                          &h_position,
229                                          &v_position);
230
231                 *position = v_position | (h_position << 16);
232                 *vbl = v_blank_start | (v_blank_end << 16);
233         }
234
235         return 0;
236 }
237
238 static bool dm_is_idle(void *handle)
239 {
240         /* XXX todo */
241         return true;
242 }
243
244 static int dm_wait_for_idle(void *handle)
245 {
246         /* XXX todo */
247         return 0;
248 }
249
250 static bool dm_check_soft_reset(void *handle)
251 {
252         return false;
253 }
254
255 static int dm_soft_reset(void *handle)
256 {
257         /* XXX todo */
258         return 0;
259 }
260
261 static struct amdgpu_crtc *
262 get_crtc_by_otg_inst(struct amdgpu_device *adev,
263                      int otg_inst)
264 {
265         struct drm_device *dev = adev->ddev;
266         struct drm_crtc *crtc;
267         struct amdgpu_crtc *amdgpu_crtc;
268
269         if (otg_inst == -1) {
270                 WARN_ON(1);
271                 return adev->mode_info.crtcs[0];
272         }
273
274         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
275                 amdgpu_crtc = to_amdgpu_crtc(crtc);
276
277                 if (amdgpu_crtc->otg_inst == otg_inst)
278                         return amdgpu_crtc;
279         }
280
281         return NULL;
282 }
283
284 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
285 {
286         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
287                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
288 }
289
290 /**
291  * dm_pflip_high_irq() - Handle pageflip interrupt
292  * @interrupt_params: ignored
293  *
294  * Handles the pageflip interrupt by notifying all interested parties
295  * that the pageflip has been completed.
296  */
297 static void dm_pflip_high_irq(void *interrupt_params)
298 {
299         struct amdgpu_crtc *amdgpu_crtc;
300         struct common_irq_params *irq_params = interrupt_params;
301         struct amdgpu_device *adev = irq_params->adev;
302         unsigned long flags;
303         struct drm_pending_vblank_event *e;
304         struct dm_crtc_state *acrtc_state;
305         uint32_t vpos, hpos, v_blank_start, v_blank_end;
306         bool vrr_active;
307
308         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
309
310         /* IRQ could occur when in initial stage */
311         /* TODO work and BO cleanup */
312         if (amdgpu_crtc == NULL) {
313                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
314                 return;
315         }
316
317         spin_lock_irqsave(&adev->ddev->event_lock, flags);
318
319         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
320                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
321                                                  amdgpu_crtc->pflip_status,
322                                                  AMDGPU_FLIP_SUBMITTED,
323                                                  amdgpu_crtc->crtc_id,
324                                                  amdgpu_crtc);
325                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
326                 return;
327         }
328
329         /* page flip completed. */
330         e = amdgpu_crtc->event;
331         amdgpu_crtc->event = NULL;
332
333         if (!e)
334                 WARN_ON(1);
335
336         acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
337         vrr_active = amdgpu_dm_vrr_active(acrtc_state);
338
339         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
340         if (!vrr_active ||
341             !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
342                                       &v_blank_end, &hpos, &vpos) ||
343             (vpos < v_blank_start)) {
344                 /* Update to correct count and vblank timestamp if racing with
345                  * vblank irq. This also updates to the correct vblank timestamp
346                  * even in VRR mode, as scanout is past the front-porch atm.
347                  */
348                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
349
350                 /* Wake up userspace by sending the pageflip event with proper
351                  * count and timestamp of vblank of flip completion.
352                  */
353                 if (e) {
354                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
355
356                         /* Event sent, so done with vblank for this flip */
357                         drm_crtc_vblank_put(&amdgpu_crtc->base);
358                 }
359         } else if (e) {
360                 /* VRR active and inside front-porch: vblank count and
361                  * timestamp for pageflip event will only be up to date after
362                  * drm_crtc_handle_vblank() has been executed from late vblank
363                  * irq handler after start of back-porch (vline 0). We queue the
364                  * pageflip event for send-out by drm_crtc_handle_vblank() with
365                  * updated timestamp and count, once it runs after us.
366                  *
367                  * We need to open-code this instead of using the helper
368                  * drm_crtc_arm_vblank_event(), as that helper would
369                  * call drm_crtc_accurate_vblank_count(), which we must
370                  * not call in VRR mode while we are in front-porch!
371                  */
372
373                 /* sequence will be replaced by real count during send-out. */
374                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
375                 e->pipe = amdgpu_crtc->crtc_id;
376
377                 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
378                 e = NULL;
379         }
380
381         /* Keep track of vblank of this flip for flip throttling. We use the
382          * cooked hw counter, as that one incremented at start of this vblank
383          * of pageflip completion, so last_flip_vblank is the forbidden count
384          * for queueing new pageflips if vsync + VRR is enabled.
385          */
386         amdgpu_crtc->last_flip_vblank =
387                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
388
389         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
390         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
391
392         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
393                          amdgpu_crtc->crtc_id, amdgpu_crtc,
394                          vrr_active, (int) !e);
395 }
396
397 static void dm_vupdate_high_irq(void *interrupt_params)
398 {
399         struct common_irq_params *irq_params = interrupt_params;
400         struct amdgpu_device *adev = irq_params->adev;
401         struct amdgpu_crtc *acrtc;
402         struct dm_crtc_state *acrtc_state;
403         unsigned long flags;
404
405         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
406
407         if (acrtc) {
408                 acrtc_state = to_dm_crtc_state(acrtc->base.state);
409
410                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
411                               acrtc->crtc_id,
412                               amdgpu_dm_vrr_active(acrtc_state));
413
414                 /* Core vblank handling is done here after end of front-porch in
415                  * vrr mode, as vblank timestamping will give valid results
416                  * while now done after front-porch. This will also deliver
417                  * page-flip completion events that have been queued to us
418                  * if a pageflip happened inside front-porch.
419                  */
420                 if (amdgpu_dm_vrr_active(acrtc_state)) {
421                         drm_crtc_handle_vblank(&acrtc->base);
422
423                         /* BTR processing for pre-DCE12 ASICs */
424                         if (acrtc_state->stream &&
425                             adev->family < AMDGPU_FAMILY_AI) {
426                                 spin_lock_irqsave(&adev->ddev->event_lock, flags);
427                                 mod_freesync_handle_v_update(
428                                     adev->dm.freesync_module,
429                                     acrtc_state->stream,
430                                     &acrtc_state->vrr_params);
431
432                                 dc_stream_adjust_vmin_vmax(
433                                     adev->dm.dc,
434                                     acrtc_state->stream,
435                                     &acrtc_state->vrr_params.adjust);
436                                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
437                         }
438                 }
439         }
440 }
441
442 /**
443  * dm_crtc_high_irq() - Handles CRTC interrupt
444  * @interrupt_params: used for determining the CRTC instance
445  *
446  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
447  * event handler.
448  */
449 static void dm_crtc_high_irq(void *interrupt_params)
450 {
451         struct common_irq_params *irq_params = interrupt_params;
452         struct amdgpu_device *adev = irq_params->adev;
453         struct amdgpu_crtc *acrtc;
454         struct dm_crtc_state *acrtc_state;
455         unsigned long flags;
456
457         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
458         if (!acrtc)
459                 return;
460
461         acrtc_state = to_dm_crtc_state(acrtc->base.state);
462
463         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
464                          amdgpu_dm_vrr_active(acrtc_state),
465                          acrtc_state->active_planes);
466
467         /**
468          * Core vblank handling at start of front-porch is only possible
469          * in non-vrr mode, as only there vblank timestamping will give
470          * valid results while done in front-porch. Otherwise defer it
471          * to dm_vupdate_high_irq after end of front-porch.
472          */
473         if (!amdgpu_dm_vrr_active(acrtc_state))
474                 drm_crtc_handle_vblank(&acrtc->base);
475
476         /**
477          * Following stuff must happen at start of vblank, for crc
478          * computation and below-the-range btr support in vrr mode.
479          */
480         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
481
482         /* BTR updates need to happen before VUPDATE on Vega and above. */
483         if (adev->family < AMDGPU_FAMILY_AI)
484                 return;
485
486         spin_lock_irqsave(&adev->ddev->event_lock, flags);
487
488         if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
489             acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
490                 mod_freesync_handle_v_update(adev->dm.freesync_module,
491                                              acrtc_state->stream,
492                                              &acrtc_state->vrr_params);
493
494                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
495                                            &acrtc_state->vrr_params.adjust);
496         }
497
498         /*
499          * If there aren't any active_planes then DCH HUBP may be clock-gated.
500          * In that case, pageflip completion interrupts won't fire and pageflip
501          * completion events won't get delivered. Prevent this by sending
502          * pending pageflip events from here if a flip is still pending.
503          *
504          * If any planes are enabled, use dm_pflip_high_irq() instead, to
505          * avoid race conditions between flip programming and completion,
506          * which could cause too early flip completion events.
507          */
508         if (adev->family >= AMDGPU_FAMILY_RV &&
509             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
510             acrtc_state->active_planes == 0) {
511                 if (acrtc->event) {
512                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
513                         acrtc->event = NULL;
514                         drm_crtc_vblank_put(&acrtc->base);
515                 }
516                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
517         }
518
519         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
520 }
521
522 static int dm_set_clockgating_state(void *handle,
523                   enum amd_clockgating_state state)
524 {
525         return 0;
526 }
527
528 static int dm_set_powergating_state(void *handle,
529                   enum amd_powergating_state state)
530 {
531         return 0;
532 }
533
534 /* Prototypes of private functions */
535 static int dm_early_init(void* handle);
536
537 /* Allocate memory for FBC compressed data  */
538 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
539 {
540         struct drm_device *dev = connector->dev;
541         struct amdgpu_device *adev = dev->dev_private;
542         struct dm_comressor_info *compressor = &adev->dm.compressor;
543         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
544         struct drm_display_mode *mode;
545         unsigned long max_size = 0;
546
547         if (adev->dm.dc->fbc_compressor == NULL)
548                 return;
549
550         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
551                 return;
552
553         if (compressor->bo_ptr)
554                 return;
555
556
557         list_for_each_entry(mode, &connector->modes, head) {
558                 if (max_size < mode->htotal * mode->vtotal)
559                         max_size = mode->htotal * mode->vtotal;
560         }
561
562         if (max_size) {
563                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
564                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
565                             &compressor->gpu_addr, &compressor->cpu_addr);
566
567                 if (r)
568                         DRM_ERROR("DM: Failed to initialize FBC\n");
569                 else {
570                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
571                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
572                 }
573
574         }
575
576 }
577
578 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
579                                           int pipe, bool *enabled,
580                                           unsigned char *buf, int max_bytes)
581 {
582         struct drm_device *dev = dev_get_drvdata(kdev);
583         struct amdgpu_device *adev = dev->dev_private;
584         struct drm_connector *connector;
585         struct drm_connector_list_iter conn_iter;
586         struct amdgpu_dm_connector *aconnector;
587         int ret = 0;
588
589         *enabled = false;
590
591         mutex_lock(&adev->dm.audio_lock);
592
593         drm_connector_list_iter_begin(dev, &conn_iter);
594         drm_for_each_connector_iter(connector, &conn_iter) {
595                 aconnector = to_amdgpu_dm_connector(connector);
596                 if (aconnector->audio_inst != port)
597                         continue;
598
599                 *enabled = true;
600                 ret = drm_eld_size(connector->eld);
601                 memcpy(buf, connector->eld, min(max_bytes, ret));
602
603                 break;
604         }
605         drm_connector_list_iter_end(&conn_iter);
606
607         mutex_unlock(&adev->dm.audio_lock);
608
609         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
610
611         return ret;
612 }
613
614 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
615         .get_eld = amdgpu_dm_audio_component_get_eld,
616 };
617
618 static int amdgpu_dm_audio_component_bind(struct device *kdev,
619                                        struct device *hda_kdev, void *data)
620 {
621         struct drm_device *dev = dev_get_drvdata(kdev);
622         struct amdgpu_device *adev = dev->dev_private;
623         struct drm_audio_component *acomp = data;
624
625         acomp->ops = &amdgpu_dm_audio_component_ops;
626         acomp->dev = kdev;
627         adev->dm.audio_component = acomp;
628
629         return 0;
630 }
631
632 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
633                                           struct device *hda_kdev, void *data)
634 {
635         struct drm_device *dev = dev_get_drvdata(kdev);
636         struct amdgpu_device *adev = dev->dev_private;
637         struct drm_audio_component *acomp = data;
638
639         acomp->ops = NULL;
640         acomp->dev = NULL;
641         adev->dm.audio_component = NULL;
642 }
643
644 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
645         .bind   = amdgpu_dm_audio_component_bind,
646         .unbind = amdgpu_dm_audio_component_unbind,
647 };
648
649 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
650 {
651         int i, ret;
652
653         if (!amdgpu_audio)
654                 return 0;
655
656         adev->mode_info.audio.enabled = true;
657
658         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
659
660         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
661                 adev->mode_info.audio.pin[i].channels = -1;
662                 adev->mode_info.audio.pin[i].rate = -1;
663                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
664                 adev->mode_info.audio.pin[i].status_bits = 0;
665                 adev->mode_info.audio.pin[i].category_code = 0;
666                 adev->mode_info.audio.pin[i].connected = false;
667                 adev->mode_info.audio.pin[i].id =
668                         adev->dm.dc->res_pool->audios[i]->inst;
669                 adev->mode_info.audio.pin[i].offset = 0;
670         }
671
672         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
673         if (ret < 0)
674                 return ret;
675
676         adev->dm.audio_registered = true;
677
678         return 0;
679 }
680
681 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
682 {
683         if (!amdgpu_audio)
684                 return;
685
686         if (!adev->mode_info.audio.enabled)
687                 return;
688
689         if (adev->dm.audio_registered) {
690                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
691                 adev->dm.audio_registered = false;
692         }
693
694         /* TODO: Disable audio? */
695
696         adev->mode_info.audio.enabled = false;
697 }
698
699 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
700 {
701         struct drm_audio_component *acomp = adev->dm.audio_component;
702
703         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
704                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
705
706                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
707                                                  pin, -1);
708         }
709 }
710
711 static int dm_dmub_hw_init(struct amdgpu_device *adev)
712 {
713         const struct dmcub_firmware_header_v1_0 *hdr;
714         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
715         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
716         const struct firmware *dmub_fw = adev->dm.dmub_fw;
717         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
718         struct abm *abm = adev->dm.dc->res_pool->abm;
719         struct dmub_srv_hw_params hw_params;
720         enum dmub_status status;
721         const unsigned char *fw_inst_const, *fw_bss_data;
722         uint32_t i, fw_inst_const_size, fw_bss_data_size;
723         bool has_hw_support;
724
725         if (!dmub_srv)
726                 /* DMUB isn't supported on the ASIC. */
727                 return 0;
728
729         if (!fb_info) {
730                 DRM_ERROR("No framebuffer info for DMUB service.\n");
731                 return -EINVAL;
732         }
733
734         if (!dmub_fw) {
735                 /* Firmware required for DMUB support. */
736                 DRM_ERROR("No firmware provided for DMUB.\n");
737                 return -EINVAL;
738         }
739
740         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
741         if (status != DMUB_STATUS_OK) {
742                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
743                 return -EINVAL;
744         }
745
746         if (!has_hw_support) {
747                 DRM_INFO("DMUB unsupported on ASIC\n");
748                 return 0;
749         }
750
751         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
752
753         fw_inst_const = dmub_fw->data +
754                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
755                         PSP_HEADER_BYTES;
756
757         fw_bss_data = dmub_fw->data +
758                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
759                       le32_to_cpu(hdr->inst_const_bytes);
760
761         /* Copy firmware and bios info into FB memory. */
762         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
763                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
764
765         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
766
767         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
768          * amdgpu_ucode_init_single_fw will load dmub firmware
769          * fw_inst_const part to cw0; otherwise, the firmware back door load
770          * will be done by dm_dmub_hw_init
771          */
772         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
773                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
774                                 fw_inst_const_size);
775         }
776
777         if (fw_bss_data_size)
778                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
779                        fw_bss_data, fw_bss_data_size);
780
781         /* Copy firmware bios info into FB memory. */
782         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
783                adev->bios_size);
784
785         /* Reset regions that need to be reset. */
786         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
787         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
788
789         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
790                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
791
792         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
793                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
794
795         /* Initialize hardware. */
796         memset(&hw_params, 0, sizeof(hw_params));
797         hw_params.fb_base = adev->gmc.fb_start;
798         hw_params.fb_offset = adev->gmc.aper_base;
799
800         /* backdoor load firmware and trigger dmub running */
801         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
802                 hw_params.load_inst_const = true;
803
804         if (dmcu)
805                 hw_params.psp_version = dmcu->psp_version;
806
807         for (i = 0; i < fb_info->num_fb; ++i)
808                 hw_params.fb[i] = &fb_info->fb[i];
809
810         status = dmub_srv_hw_init(dmub_srv, &hw_params);
811         if (status != DMUB_STATUS_OK) {
812                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
813                 return -EINVAL;
814         }
815
816         /* Wait for firmware load to finish. */
817         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
818         if (status != DMUB_STATUS_OK)
819                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
820
821         /* Init DMCU and ABM if available. */
822         if (dmcu && abm) {
823                 dmcu->funcs->dmcu_init(dmcu);
824                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
825         }
826
827         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
828         if (!adev->dm.dc->ctx->dmub_srv) {
829                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
830                 return -ENOMEM;
831         }
832
833         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
834                  adev->dm.dmcub_fw_version);
835
836         return 0;
837 }
838
839 static int amdgpu_dm_init(struct amdgpu_device *adev)
840 {
841         struct dc_init_data init_data;
842 #ifdef CONFIG_DRM_AMD_DC_HDCP
843         struct dc_callback_init init_params;
844 #endif
845         int r;
846
847         adev->dm.ddev = adev->ddev;
848         adev->dm.adev = adev;
849
850         /* Zero all the fields */
851         memset(&init_data, 0, sizeof(init_data));
852 #ifdef CONFIG_DRM_AMD_DC_HDCP
853         memset(&init_params, 0, sizeof(init_params));
854 #endif
855
856         mutex_init(&adev->dm.dc_lock);
857         mutex_init(&adev->dm.audio_lock);
858
859         if(amdgpu_dm_irq_init(adev)) {
860                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
861                 goto error;
862         }
863
864         init_data.asic_id.chip_family = adev->family;
865
866         init_data.asic_id.pci_revision_id = adev->pdev->revision;
867         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
868
869         init_data.asic_id.vram_width = adev->gmc.vram_width;
870         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
871         init_data.asic_id.atombios_base_address =
872                 adev->mode_info.atom_context->bios;
873
874         init_data.driver = adev;
875
876         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
877
878         if (!adev->dm.cgs_device) {
879                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
880                 goto error;
881         }
882
883         init_data.cgs_device = adev->dm.cgs_device;
884
885         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
886
887         switch (adev->asic_type) {
888         case CHIP_CARRIZO:
889         case CHIP_STONEY:
890         case CHIP_RAVEN:
891         case CHIP_RENOIR:
892                 init_data.flags.gpu_vm_support = true;
893                 break;
894         default:
895                 break;
896         }
897
898         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
899                 init_data.flags.fbc_support = true;
900
901         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
902                 init_data.flags.multi_mon_pp_mclk_switch = true;
903
904         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
905                 init_data.flags.disable_fractional_pwm = true;
906
907         init_data.flags.power_down_display_on_boot = true;
908
909         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
910
911         /* Display Core create. */
912         adev->dm.dc = dc_create(&init_data);
913
914         if (adev->dm.dc) {
915                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
916         } else {
917                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
918                 goto error;
919         }
920
921         r = dm_dmub_hw_init(adev);
922         if (r) {
923                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
924                 goto error;
925         }
926
927         dc_hardware_init(adev->dm.dc);
928
929         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
930         if (!adev->dm.freesync_module) {
931                 DRM_ERROR(
932                 "amdgpu: failed to initialize freesync_module.\n");
933         } else
934                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
935                                 adev->dm.freesync_module);
936
937         amdgpu_dm_init_color_mod();
938
939 #ifdef CONFIG_DRM_AMD_DC_HDCP
940         if (adev->asic_type >= CHIP_RAVEN) {
941                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
942
943                 if (!adev->dm.hdcp_workqueue)
944                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
945                 else
946                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
947
948                 dc_init_callbacks(adev->dm.dc, &init_params);
949         }
950 #endif
951         if (amdgpu_dm_initialize_drm_device(adev)) {
952                 DRM_ERROR(
953                 "amdgpu: failed to initialize sw for display support.\n");
954                 goto error;
955         }
956
957         /* Update the actual used number of crtc */
958         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
959
960         /* TODO: Add_display_info? */
961
962         /* TODO use dynamic cursor width */
963         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
964         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
965
966         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
967                 DRM_ERROR(
968                 "amdgpu: failed to initialize sw for display support.\n");
969                 goto error;
970         }
971
972         DRM_DEBUG_DRIVER("KMS initialized.\n");
973
974         return 0;
975 error:
976         amdgpu_dm_fini(adev);
977
978         return -EINVAL;
979 }
980
981 static void amdgpu_dm_fini(struct amdgpu_device *adev)
982 {
983         amdgpu_dm_audio_fini(adev);
984
985         amdgpu_dm_destroy_drm_device(&adev->dm);
986
987 #ifdef CONFIG_DRM_AMD_DC_HDCP
988         if (adev->dm.hdcp_workqueue) {
989                 hdcp_destroy(adev->dm.hdcp_workqueue);
990                 adev->dm.hdcp_workqueue = NULL;
991         }
992
993         if (adev->dm.dc)
994                 dc_deinit_callbacks(adev->dm.dc);
995 #endif
996         if (adev->dm.dc->ctx->dmub_srv) {
997                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
998                 adev->dm.dc->ctx->dmub_srv = NULL;
999         }
1000
1001         if (adev->dm.dmub_bo)
1002                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1003                                       &adev->dm.dmub_bo_gpu_addr,
1004                                       &adev->dm.dmub_bo_cpu_addr);
1005
1006         /* DC Destroy TODO: Replace destroy DAL */
1007         if (adev->dm.dc)
1008                 dc_destroy(&adev->dm.dc);
1009         /*
1010          * TODO: pageflip, vlank interrupt
1011          *
1012          * amdgpu_dm_irq_fini(adev);
1013          */
1014
1015         if (adev->dm.cgs_device) {
1016                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1017                 adev->dm.cgs_device = NULL;
1018         }
1019         if (adev->dm.freesync_module) {
1020                 mod_freesync_destroy(adev->dm.freesync_module);
1021                 adev->dm.freesync_module = NULL;
1022         }
1023
1024         mutex_destroy(&adev->dm.audio_lock);
1025         mutex_destroy(&adev->dm.dc_lock);
1026
1027         return;
1028 }
1029
1030 static int load_dmcu_fw(struct amdgpu_device *adev)
1031 {
1032         const char *fw_name_dmcu = NULL;
1033         int r;
1034         const struct dmcu_firmware_header_v1_0 *hdr;
1035
1036         switch(adev->asic_type) {
1037         case CHIP_BONAIRE:
1038         case CHIP_HAWAII:
1039         case CHIP_KAVERI:
1040         case CHIP_KABINI:
1041         case CHIP_MULLINS:
1042         case CHIP_TONGA:
1043         case CHIP_FIJI:
1044         case CHIP_CARRIZO:
1045         case CHIP_STONEY:
1046         case CHIP_POLARIS11:
1047         case CHIP_POLARIS10:
1048         case CHIP_POLARIS12:
1049         case CHIP_VEGAM:
1050         case CHIP_VEGA10:
1051         case CHIP_VEGA12:
1052         case CHIP_VEGA20:
1053         case CHIP_NAVI10:
1054         case CHIP_NAVI14:
1055         case CHIP_RENOIR:
1056                 return 0;
1057         case CHIP_NAVI12:
1058                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1059                 break;
1060         case CHIP_RAVEN:
1061                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1062                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1063                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1064                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1065                 else
1066                         return 0;
1067                 break;
1068         default:
1069                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1070                 return -EINVAL;
1071         }
1072
1073         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1074                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1075                 return 0;
1076         }
1077
1078         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1079         if (r == -ENOENT) {
1080                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1081                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1082                 adev->dm.fw_dmcu = NULL;
1083                 return 0;
1084         }
1085         if (r) {
1086                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1087                         fw_name_dmcu);
1088                 return r;
1089         }
1090
1091         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1092         if (r) {
1093                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1094                         fw_name_dmcu);
1095                 release_firmware(adev->dm.fw_dmcu);
1096                 adev->dm.fw_dmcu = NULL;
1097                 return r;
1098         }
1099
1100         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1101         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1102         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1103         adev->firmware.fw_size +=
1104                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1105
1106         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1107         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1108         adev->firmware.fw_size +=
1109                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1110
1111         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1112
1113         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1114
1115         return 0;
1116 }
1117
1118 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1119 {
1120         struct amdgpu_device *adev = ctx;
1121
1122         return dm_read_reg(adev->dm.dc->ctx, address);
1123 }
1124
1125 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1126                                      uint32_t value)
1127 {
1128         struct amdgpu_device *adev = ctx;
1129
1130         return dm_write_reg(adev->dm.dc->ctx, address, value);
1131 }
1132
1133 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1134 {
1135         struct dmub_srv_create_params create_params;
1136         struct dmub_srv_region_params region_params;
1137         struct dmub_srv_region_info region_info;
1138         struct dmub_srv_fb_params fb_params;
1139         struct dmub_srv_fb_info *fb_info;
1140         struct dmub_srv *dmub_srv;
1141         const struct dmcub_firmware_header_v1_0 *hdr;
1142         const char *fw_name_dmub;
1143         enum dmub_asic dmub_asic;
1144         enum dmub_status status;
1145         int r;
1146
1147         switch (adev->asic_type) {
1148         case CHIP_RENOIR:
1149                 dmub_asic = DMUB_ASIC_DCN21;
1150                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1151                 break;
1152
1153         default:
1154                 /* ASIC doesn't support DMUB. */
1155                 return 0;
1156         }
1157
1158         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1159         if (r) {
1160                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1161                 return 0;
1162         }
1163
1164         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1165         if (r) {
1166                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1167                 return 0;
1168         }
1169
1170         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1171
1172         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1173                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1174                         AMDGPU_UCODE_ID_DMCUB;
1175                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1176                         adev->dm.dmub_fw;
1177                 adev->firmware.fw_size +=
1178                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1179
1180                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1181                          adev->dm.dmcub_fw_version);
1182         }
1183
1184         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1185
1186         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1187         dmub_srv = adev->dm.dmub_srv;
1188
1189         if (!dmub_srv) {
1190                 DRM_ERROR("Failed to allocate DMUB service!\n");
1191                 return -ENOMEM;
1192         }
1193
1194         memset(&create_params, 0, sizeof(create_params));
1195         create_params.user_ctx = adev;
1196         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1197         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1198         create_params.asic = dmub_asic;
1199
1200         /* Create the DMUB service. */
1201         status = dmub_srv_create(dmub_srv, &create_params);
1202         if (status != DMUB_STATUS_OK) {
1203                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1204                 return -EINVAL;
1205         }
1206
1207         /* Calculate the size of all the regions for the DMUB service. */
1208         memset(&region_params, 0, sizeof(region_params));
1209
1210         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1211                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1212         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1213         region_params.vbios_size = adev->bios_size;
1214         region_params.fw_bss_data =
1215                 adev->dm.dmub_fw->data +
1216                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1217                 le32_to_cpu(hdr->inst_const_bytes);
1218         region_params.fw_inst_const =
1219                 adev->dm.dmub_fw->data +
1220                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1221                 PSP_HEADER_BYTES;
1222
1223         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1224                                            &region_info);
1225
1226         if (status != DMUB_STATUS_OK) {
1227                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1228                 return -EINVAL;
1229         }
1230
1231         /*
1232          * Allocate a framebuffer based on the total size of all the regions.
1233          * TODO: Move this into GART.
1234          */
1235         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1236                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1237                                     &adev->dm.dmub_bo_gpu_addr,
1238                                     &adev->dm.dmub_bo_cpu_addr);
1239         if (r)
1240                 return r;
1241
1242         /* Rebase the regions on the framebuffer address. */
1243         memset(&fb_params, 0, sizeof(fb_params));
1244         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1245         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1246         fb_params.region_info = &region_info;
1247
1248         adev->dm.dmub_fb_info =
1249                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1250         fb_info = adev->dm.dmub_fb_info;
1251
1252         if (!fb_info) {
1253                 DRM_ERROR(
1254                         "Failed to allocate framebuffer info for DMUB service!\n");
1255                 return -ENOMEM;
1256         }
1257
1258         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1259         if (status != DMUB_STATUS_OK) {
1260                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1261                 return -EINVAL;
1262         }
1263
1264         return 0;
1265 }
1266
1267 static int dm_sw_init(void *handle)
1268 {
1269         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1270         int r;
1271
1272         r = dm_dmub_sw_init(adev);
1273         if (r)
1274                 return r;
1275
1276         return load_dmcu_fw(adev);
1277 }
1278
1279 static int dm_sw_fini(void *handle)
1280 {
1281         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1282
1283         kfree(adev->dm.dmub_fb_info);
1284         adev->dm.dmub_fb_info = NULL;
1285
1286         if (adev->dm.dmub_srv) {
1287                 dmub_srv_destroy(adev->dm.dmub_srv);
1288                 adev->dm.dmub_srv = NULL;
1289         }
1290
1291         if (adev->dm.dmub_fw) {
1292                 release_firmware(adev->dm.dmub_fw);
1293                 adev->dm.dmub_fw = NULL;
1294         }
1295
1296         if(adev->dm.fw_dmcu) {
1297                 release_firmware(adev->dm.fw_dmcu);
1298                 adev->dm.fw_dmcu = NULL;
1299         }
1300
1301         return 0;
1302 }
1303
1304 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1305 {
1306         struct amdgpu_dm_connector *aconnector;
1307         struct drm_connector *connector;
1308         struct drm_connector_list_iter iter;
1309         int ret = 0;
1310
1311         drm_connector_list_iter_begin(dev, &iter);
1312         drm_for_each_connector_iter(connector, &iter) {
1313                 aconnector = to_amdgpu_dm_connector(connector);
1314                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1315                     aconnector->mst_mgr.aux) {
1316                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1317                                          aconnector,
1318                                          aconnector->base.base.id);
1319
1320                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1321                         if (ret < 0) {
1322                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1323                                 aconnector->dc_link->type =
1324                                         dc_connection_single;
1325                                 break;
1326                         }
1327                 }
1328         }
1329         drm_connector_list_iter_end(&iter);
1330
1331         return ret;
1332 }
1333
1334 static int dm_late_init(void *handle)
1335 {
1336         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1337
1338         struct dmcu_iram_parameters params;
1339         unsigned int linear_lut[16];
1340         int i;
1341         struct dmcu *dmcu = NULL;
1342         bool ret = false;
1343
1344         if (!adev->dm.fw_dmcu)
1345                 return detect_mst_link_for_all_connectors(adev->ddev);
1346
1347         dmcu = adev->dm.dc->res_pool->dmcu;
1348
1349         for (i = 0; i < 16; i++)
1350                 linear_lut[i] = 0xFFFF * i / 15;
1351
1352         params.set = 0;
1353         params.backlight_ramping_start = 0xCCCC;
1354         params.backlight_ramping_reduction = 0xCCCCCCCC;
1355         params.backlight_lut_array_size = 16;
1356         params.backlight_lut_array = linear_lut;
1357
1358         /* Min backlight level after ABM reduction,  Don't allow below 1%
1359          * 0xFFFF x 0.01 = 0x28F
1360          */
1361         params.min_abm_backlight = 0x28F;
1362
1363         /* todo will enable for navi10 */
1364         if (adev->asic_type <= CHIP_RAVEN) {
1365                 ret = dmcu_load_iram(dmcu, params);
1366
1367                 if (!ret)
1368                         return -EINVAL;
1369         }
1370
1371         return detect_mst_link_for_all_connectors(adev->ddev);
1372 }
1373
1374 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1375 {
1376         struct amdgpu_dm_connector *aconnector;
1377         struct drm_connector *connector;
1378         struct drm_connector_list_iter iter;
1379         struct drm_dp_mst_topology_mgr *mgr;
1380         int ret;
1381         bool need_hotplug = false;
1382
1383         drm_connector_list_iter_begin(dev, &iter);
1384         drm_for_each_connector_iter(connector, &iter) {
1385                 aconnector = to_amdgpu_dm_connector(connector);
1386                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1387                     aconnector->mst_port)
1388                         continue;
1389
1390                 mgr = &aconnector->mst_mgr;
1391
1392                 if (suspend) {
1393                         drm_dp_mst_topology_mgr_suspend(mgr);
1394                 } else {
1395                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1396                         if (ret < 0) {
1397                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1398                                 need_hotplug = true;
1399                         }
1400                 }
1401         }
1402         drm_connector_list_iter_end(&iter);
1403
1404         if (need_hotplug)
1405                 drm_kms_helper_hotplug_event(dev);
1406 }
1407
1408 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1409 {
1410         struct smu_context *smu = &adev->smu;
1411         int ret = 0;
1412
1413         if (!is_support_sw_smu(adev))
1414                 return 0;
1415
1416         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1417          * on window driver dc implementation.
1418          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1419          * should be passed to smu during boot up and resume from s3.
1420          * boot up: dc calculate dcn watermark clock settings within dc_create,
1421          * dcn20_resource_construct
1422          * then call pplib functions below to pass the settings to smu:
1423          * smu_set_watermarks_for_clock_ranges
1424          * smu_set_watermarks_table
1425          * navi10_set_watermarks_table
1426          * smu_write_watermarks_table
1427          *
1428          * For Renoir, clock settings of dcn watermark are also fixed values.
1429          * dc has implemented different flow for window driver:
1430          * dc_hardware_init / dc_set_power_state
1431          * dcn10_init_hw
1432          * notify_wm_ranges
1433          * set_wm_ranges
1434          * -- Linux
1435          * smu_set_watermarks_for_clock_ranges
1436          * renoir_set_watermarks_table
1437          * smu_write_watermarks_table
1438          *
1439          * For Linux,
1440          * dc_hardware_init -> amdgpu_dm_init
1441          * dc_set_power_state --> dm_resume
1442          *
1443          * therefore, this function apply to navi10/12/14 but not Renoir
1444          * *
1445          */
1446         switch(adev->asic_type) {
1447         case CHIP_NAVI10:
1448         case CHIP_NAVI14:
1449         case CHIP_NAVI12:
1450                 break;
1451         default:
1452                 return 0;
1453         }
1454
1455         mutex_lock(&smu->mutex);
1456
1457         /* pass data to smu controller */
1458         if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1459                         !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1460                 ret = smu_write_watermarks_table(smu);
1461
1462                 if (ret) {
1463                         mutex_unlock(&smu->mutex);
1464                         DRM_ERROR("Failed to update WMTABLE!\n");
1465                         return ret;
1466                 }
1467                 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1468         }
1469
1470         mutex_unlock(&smu->mutex);
1471
1472         return 0;
1473 }
1474
1475 /**
1476  * dm_hw_init() - Initialize DC device
1477  * @handle: The base driver device containing the amdgpu_dm device.
1478  *
1479  * Initialize the &struct amdgpu_display_manager device. This involves calling
1480  * the initializers of each DM component, then populating the struct with them.
1481  *
1482  * Although the function implies hardware initialization, both hardware and
1483  * software are initialized here. Splitting them out to their relevant init
1484  * hooks is a future TODO item.
1485  *
1486  * Some notable things that are initialized here:
1487  *
1488  * - Display Core, both software and hardware
1489  * - DC modules that we need (freesync and color management)
1490  * - DRM software states
1491  * - Interrupt sources and handlers
1492  * - Vblank support
1493  * - Debug FS entries, if enabled
1494  */
1495 static int dm_hw_init(void *handle)
1496 {
1497         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1498         /* Create DAL display manager */
1499         amdgpu_dm_init(adev);
1500         amdgpu_dm_hpd_init(adev);
1501
1502         return 0;
1503 }
1504
1505 /**
1506  * dm_hw_fini() - Teardown DC device
1507  * @handle: The base driver device containing the amdgpu_dm device.
1508  *
1509  * Teardown components within &struct amdgpu_display_manager that require
1510  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1511  * were loaded. Also flush IRQ workqueues and disable them.
1512  */
1513 static int dm_hw_fini(void *handle)
1514 {
1515         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1516
1517         amdgpu_dm_hpd_fini(adev);
1518
1519         amdgpu_dm_irq_fini(adev);
1520         amdgpu_dm_fini(adev);
1521         return 0;
1522 }
1523
1524 static int dm_suspend(void *handle)
1525 {
1526         struct amdgpu_device *adev = handle;
1527         struct amdgpu_display_manager *dm = &adev->dm;
1528
1529         WARN_ON(adev->dm.cached_state);
1530         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
1531
1532         s3_handle_mst(adev->ddev, true);
1533
1534         amdgpu_dm_irq_suspend(adev);
1535
1536
1537         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1538
1539         return 0;
1540 }
1541
1542 static struct amdgpu_dm_connector *
1543 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1544                                              struct drm_crtc *crtc)
1545 {
1546         uint32_t i;
1547         struct drm_connector_state *new_con_state;
1548         struct drm_connector *connector;
1549         struct drm_crtc *crtc_from_state;
1550
1551         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1552                 crtc_from_state = new_con_state->crtc;
1553
1554                 if (crtc_from_state == crtc)
1555                         return to_amdgpu_dm_connector(connector);
1556         }
1557
1558         return NULL;
1559 }
1560
1561 static void emulated_link_detect(struct dc_link *link)
1562 {
1563         struct dc_sink_init_data sink_init_data = { 0 };
1564         struct display_sink_capability sink_caps = { 0 };
1565         enum dc_edid_status edid_status;
1566         struct dc_context *dc_ctx = link->ctx;
1567         struct dc_sink *sink = NULL;
1568         struct dc_sink *prev_sink = NULL;
1569
1570         link->type = dc_connection_none;
1571         prev_sink = link->local_sink;
1572
1573         if (prev_sink != NULL)
1574                 dc_sink_retain(prev_sink);
1575
1576         switch (link->connector_signal) {
1577         case SIGNAL_TYPE_HDMI_TYPE_A: {
1578                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1579                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1580                 break;
1581         }
1582
1583         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1584                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1585                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1586                 break;
1587         }
1588
1589         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1590                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1591                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1592                 break;
1593         }
1594
1595         case SIGNAL_TYPE_LVDS: {
1596                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1597                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1598                 break;
1599         }
1600
1601         case SIGNAL_TYPE_EDP: {
1602                 sink_caps.transaction_type =
1603                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1604                 sink_caps.signal = SIGNAL_TYPE_EDP;
1605                 break;
1606         }
1607
1608         case SIGNAL_TYPE_DISPLAY_PORT: {
1609                 sink_caps.transaction_type =
1610                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1611                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1612                 break;
1613         }
1614
1615         default:
1616                 DC_ERROR("Invalid connector type! signal:%d\n",
1617                         link->connector_signal);
1618                 return;
1619         }
1620
1621         sink_init_data.link = link;
1622         sink_init_data.sink_signal = sink_caps.signal;
1623
1624         sink = dc_sink_create(&sink_init_data);
1625         if (!sink) {
1626                 DC_ERROR("Failed to create sink!\n");
1627                 return;
1628         }
1629
1630         /* dc_sink_create returns a new reference */
1631         link->local_sink = sink;
1632
1633         edid_status = dm_helpers_read_local_edid(
1634                         link->ctx,
1635                         link,
1636                         sink);
1637
1638         if (edid_status != EDID_OK)
1639                 DC_ERROR("Failed to read EDID");
1640
1641 }
1642
1643 static int dm_resume(void *handle)
1644 {
1645         struct amdgpu_device *adev = handle;
1646         struct drm_device *ddev = adev->ddev;
1647         struct amdgpu_display_manager *dm = &adev->dm;
1648         struct amdgpu_dm_connector *aconnector;
1649         struct drm_connector *connector;
1650         struct drm_connector_list_iter iter;
1651         struct drm_crtc *crtc;
1652         struct drm_crtc_state *new_crtc_state;
1653         struct dm_crtc_state *dm_new_crtc_state;
1654         struct drm_plane *plane;
1655         struct drm_plane_state *new_plane_state;
1656         struct dm_plane_state *dm_new_plane_state;
1657         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1658         enum dc_connection_type new_connection_type = dc_connection_none;
1659         int i, r;
1660
1661         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
1662         dc_release_state(dm_state->context);
1663         dm_state->context = dc_create_state(dm->dc);
1664         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1665         dc_resource_state_construct(dm->dc, dm_state->context);
1666
1667         /* Before powering on DC we need to re-initialize DMUB. */
1668         r = dm_dmub_hw_init(adev);
1669         if (r)
1670                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1671
1672         /* power on hardware */
1673         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1674
1675         /* program HPD filter */
1676         dc_resume(dm->dc);
1677
1678         /*
1679          * early enable HPD Rx IRQ, should be done before set mode as short
1680          * pulse interrupts are used for MST
1681          */
1682         amdgpu_dm_irq_resume_early(adev);
1683
1684         /* On resume we need to rewrite the MSTM control bits to enable MST*/
1685         s3_handle_mst(ddev, false);
1686
1687         /* Do detection*/
1688         drm_connector_list_iter_begin(ddev, &iter);
1689         drm_for_each_connector_iter(connector, &iter) {
1690                 aconnector = to_amdgpu_dm_connector(connector);
1691
1692                 /*
1693                  * this is the case when traversing through already created
1694                  * MST connectors, should be skipped
1695                  */
1696                 if (aconnector->mst_port)
1697                         continue;
1698
1699                 mutex_lock(&aconnector->hpd_lock);
1700                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1701                         DRM_ERROR("KMS: Failed to detect connector\n");
1702
1703                 if (aconnector->base.force && new_connection_type == dc_connection_none)
1704                         emulated_link_detect(aconnector->dc_link);
1705                 else
1706                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
1707
1708                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1709                         aconnector->fake_enable = false;
1710
1711                 if (aconnector->dc_sink)
1712                         dc_sink_release(aconnector->dc_sink);
1713                 aconnector->dc_sink = NULL;
1714                 amdgpu_dm_update_connector_after_detect(aconnector);
1715                 mutex_unlock(&aconnector->hpd_lock);
1716         }
1717         drm_connector_list_iter_end(&iter);
1718
1719         /* Force mode set in atomic commit */
1720         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
1721                 new_crtc_state->active_changed = true;
1722
1723         /*
1724          * atomic_check is expected to create the dc states. We need to release
1725          * them here, since they were duplicated as part of the suspend
1726          * procedure.
1727          */
1728         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
1729                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1730                 if (dm_new_crtc_state->stream) {
1731                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
1732                         dc_stream_release(dm_new_crtc_state->stream);
1733                         dm_new_crtc_state->stream = NULL;
1734                 }
1735         }
1736
1737         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
1738                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
1739                 if (dm_new_plane_state->dc_state) {
1740                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
1741                         dc_plane_state_release(dm_new_plane_state->dc_state);
1742                         dm_new_plane_state->dc_state = NULL;
1743                 }
1744         }
1745
1746         drm_atomic_helper_resume(ddev, dm->cached_state);
1747
1748         dm->cached_state = NULL;
1749
1750         amdgpu_dm_irq_resume_late(adev);
1751
1752         amdgpu_dm_smu_write_watermarks_table(adev);
1753
1754         return 0;
1755 }
1756
1757 /**
1758  * DOC: DM Lifecycle
1759  *
1760  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
1761  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
1762  * the base driver's device list to be initialized and torn down accordingly.
1763  *
1764  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
1765  */
1766
1767 static const struct amd_ip_funcs amdgpu_dm_funcs = {
1768         .name = "dm",
1769         .early_init = dm_early_init,
1770         .late_init = dm_late_init,
1771         .sw_init = dm_sw_init,
1772         .sw_fini = dm_sw_fini,
1773         .hw_init = dm_hw_init,
1774         .hw_fini = dm_hw_fini,
1775         .suspend = dm_suspend,
1776         .resume = dm_resume,
1777         .is_idle = dm_is_idle,
1778         .wait_for_idle = dm_wait_for_idle,
1779         .check_soft_reset = dm_check_soft_reset,
1780         .soft_reset = dm_soft_reset,
1781         .set_clockgating_state = dm_set_clockgating_state,
1782         .set_powergating_state = dm_set_powergating_state,
1783 };
1784
1785 const struct amdgpu_ip_block_version dm_ip_block =
1786 {
1787         .type = AMD_IP_BLOCK_TYPE_DCE,
1788         .major = 1,
1789         .minor = 0,
1790         .rev = 0,
1791         .funcs = &amdgpu_dm_funcs,
1792 };
1793
1794
1795 /**
1796  * DOC: atomic
1797  *
1798  * *WIP*
1799  */
1800
1801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1802         .fb_create = amdgpu_display_user_framebuffer_create,
1803         .output_poll_changed = drm_fb_helper_output_poll_changed,
1804         .atomic_check = amdgpu_dm_atomic_check,
1805         .atomic_commit = amdgpu_dm_atomic_commit,
1806 };
1807
1808 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
1809         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
1810 };
1811
1812 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
1813 {
1814         u32 max_cll, min_cll, max, min, q, r;
1815         struct amdgpu_dm_backlight_caps *caps;
1816         struct amdgpu_display_manager *dm;
1817         struct drm_connector *conn_base;
1818         struct amdgpu_device *adev;
1819         static const u8 pre_computed_values[] = {
1820                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
1821                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
1822
1823         if (!aconnector || !aconnector->dc_link)
1824                 return;
1825
1826         conn_base = &aconnector->base;
1827         adev = conn_base->dev->dev_private;
1828         dm = &adev->dm;
1829         caps = &dm->backlight_caps;
1830         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
1831         caps->aux_support = false;
1832         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
1833         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
1834
1835         if (caps->ext_caps->bits.oled == 1 ||
1836             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
1837             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
1838                 caps->aux_support = true;
1839
1840         /* From the specification (CTA-861-G), for calculating the maximum
1841          * luminance we need to use:
1842          *      Luminance = 50*2**(CV/32)
1843          * Where CV is a one-byte value.
1844          * For calculating this expression we may need float point precision;
1845          * to avoid this complexity level, we take advantage that CV is divided
1846          * by a constant. From the Euclids division algorithm, we know that CV
1847          * can be written as: CV = 32*q + r. Next, we replace CV in the
1848          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
1849          * need to pre-compute the value of r/32. For pre-computing the values
1850          * We just used the following Ruby line:
1851          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
1852          * The results of the above expressions can be verified at
1853          * pre_computed_values.
1854          */
1855         q = max_cll >> 5;
1856         r = max_cll % 32;
1857         max = (1 << q) * pre_computed_values[r];
1858
1859         // min luminance: maxLum * (CV/255)^2 / 100
1860         q = DIV_ROUND_CLOSEST(min_cll, 255);
1861         min = max * DIV_ROUND_CLOSEST((q * q), 100);
1862
1863         caps->aux_max_input_signal = max;
1864         caps->aux_min_input_signal = min;
1865 }
1866
1867 void amdgpu_dm_update_connector_after_detect(
1868                 struct amdgpu_dm_connector *aconnector)
1869 {
1870         struct drm_connector *connector = &aconnector->base;
1871         struct drm_device *dev = connector->dev;
1872         struct dc_sink *sink;
1873
1874         /* MST handled by drm_mst framework */
1875         if (aconnector->mst_mgr.mst_state == true)
1876                 return;
1877
1878
1879         sink = aconnector->dc_link->local_sink;
1880         if (sink)
1881                 dc_sink_retain(sink);
1882
1883         /*
1884          * Edid mgmt connector gets first update only in mode_valid hook and then
1885          * the connector sink is set to either fake or physical sink depends on link status.
1886          * Skip if already done during boot.
1887          */
1888         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
1889                         && aconnector->dc_em_sink) {
1890
1891                 /*
1892                  * For S3 resume with headless use eml_sink to fake stream
1893                  * because on resume connector->sink is set to NULL
1894                  */
1895                 mutex_lock(&dev->mode_config.mutex);
1896
1897                 if (sink) {
1898                         if (aconnector->dc_sink) {
1899                                 amdgpu_dm_update_freesync_caps(connector, NULL);
1900                                 /*
1901                                  * retain and release below are used to
1902                                  * bump up refcount for sink because the link doesn't point
1903                                  * to it anymore after disconnect, so on next crtc to connector
1904                                  * reshuffle by UMD we will get into unwanted dc_sink release
1905                                  */
1906                                 dc_sink_release(aconnector->dc_sink);
1907                         }
1908                         aconnector->dc_sink = sink;
1909                         dc_sink_retain(aconnector->dc_sink);
1910                         amdgpu_dm_update_freesync_caps(connector,
1911                                         aconnector->edid);
1912                 } else {
1913                         amdgpu_dm_update_freesync_caps(connector, NULL);
1914                         if (!aconnector->dc_sink) {
1915                                 aconnector->dc_sink = aconnector->dc_em_sink;
1916                                 dc_sink_retain(aconnector->dc_sink);
1917                         }
1918                 }
1919
1920                 mutex_unlock(&dev->mode_config.mutex);
1921
1922                 if (sink)
1923                         dc_sink_release(sink);
1924                 return;
1925         }
1926
1927         /*
1928          * TODO: temporary guard to look for proper fix
1929          * if this sink is MST sink, we should not do anything
1930          */
1931         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1932                 dc_sink_release(sink);
1933                 return;
1934         }
1935
1936         if (aconnector->dc_sink == sink) {
1937                 /*
1938                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
1939                  * Do nothing!!
1940                  */
1941                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1942                                 aconnector->connector_id);
1943                 if (sink)
1944                         dc_sink_release(sink);
1945                 return;
1946         }
1947
1948         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
1949                 aconnector->connector_id, aconnector->dc_sink, sink);
1950
1951         mutex_lock(&dev->mode_config.mutex);
1952
1953         /*
1954          * 1. Update status of the drm connector
1955          * 2. Send an event and let userspace tell us what to do
1956          */
1957         if (sink) {
1958                 /*
1959                  * TODO: check if we still need the S3 mode update workaround.
1960                  * If yes, put it here.
1961                  */
1962                 if (aconnector->dc_sink)
1963                         amdgpu_dm_update_freesync_caps(connector, NULL);
1964
1965                 aconnector->dc_sink = sink;
1966                 dc_sink_retain(aconnector->dc_sink);
1967                 if (sink->dc_edid.length == 0) {
1968                         aconnector->edid = NULL;
1969                         if (aconnector->dc_link->aux_mode) {
1970                                 drm_dp_cec_unset_edid(
1971                                         &aconnector->dm_dp_aux.aux);
1972                         }
1973                 } else {
1974                         aconnector->edid =
1975                                 (struct edid *)sink->dc_edid.raw_edid;
1976
1977                         drm_connector_update_edid_property(connector,
1978                                                            aconnector->edid);
1979
1980                         if (aconnector->dc_link->aux_mode)
1981                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
1982                                                     aconnector->edid);
1983                 }
1984
1985                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
1986                 update_connector_ext_caps(aconnector);
1987         } else {
1988                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
1989                 amdgpu_dm_update_freesync_caps(connector, NULL);
1990                 drm_connector_update_edid_property(connector, NULL);
1991                 aconnector->num_modes = 0;
1992                 dc_sink_release(aconnector->dc_sink);
1993                 aconnector->dc_sink = NULL;
1994                 aconnector->edid = NULL;
1995 #ifdef CONFIG_DRM_AMD_DC_HDCP
1996                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
1997                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1998                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1999 #endif
2000         }
2001
2002         mutex_unlock(&dev->mode_config.mutex);
2003
2004         if (sink)
2005                 dc_sink_release(sink);
2006 }
2007
2008 static void handle_hpd_irq(void *param)
2009 {
2010         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2011         struct drm_connector *connector = &aconnector->base;
2012         struct drm_device *dev = connector->dev;
2013         enum dc_connection_type new_connection_type = dc_connection_none;
2014 #ifdef CONFIG_DRM_AMD_DC_HDCP
2015         struct amdgpu_device *adev = dev->dev_private;
2016 #endif
2017
2018         /*
2019          * In case of failure or MST no need to update connector status or notify the OS
2020          * since (for MST case) MST does this in its own context.
2021          */
2022         mutex_lock(&aconnector->hpd_lock);
2023
2024 #ifdef CONFIG_DRM_AMD_DC_HDCP
2025         if (adev->dm.hdcp_workqueue)
2026                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2027 #endif
2028         if (aconnector->fake_enable)
2029                 aconnector->fake_enable = false;
2030
2031         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2032                 DRM_ERROR("KMS: Failed to detect connector\n");
2033
2034         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2035                 emulated_link_detect(aconnector->dc_link);
2036
2037
2038                 drm_modeset_lock_all(dev);
2039                 dm_restore_drm_connector_state(dev, connector);
2040                 drm_modeset_unlock_all(dev);
2041
2042                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2043                         drm_kms_helper_hotplug_event(dev);
2044
2045         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2046                 amdgpu_dm_update_connector_after_detect(aconnector);
2047
2048
2049                 drm_modeset_lock_all(dev);
2050                 dm_restore_drm_connector_state(dev, connector);
2051                 drm_modeset_unlock_all(dev);
2052
2053                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2054                         drm_kms_helper_hotplug_event(dev);
2055         }
2056         mutex_unlock(&aconnector->hpd_lock);
2057
2058 }
2059
2060 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2061 {
2062         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2063         uint8_t dret;
2064         bool new_irq_handled = false;
2065         int dpcd_addr;
2066         int dpcd_bytes_to_read;
2067
2068         const int max_process_count = 30;
2069         int process_count = 0;
2070
2071         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2072
2073         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2074                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2075                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2076                 dpcd_addr = DP_SINK_COUNT;
2077         } else {
2078                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2079                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2080                 dpcd_addr = DP_SINK_COUNT_ESI;
2081         }
2082
2083         dret = drm_dp_dpcd_read(
2084                 &aconnector->dm_dp_aux.aux,
2085                 dpcd_addr,
2086                 esi,
2087                 dpcd_bytes_to_read);
2088
2089         while (dret == dpcd_bytes_to_read &&
2090                 process_count < max_process_count) {
2091                 uint8_t retry;
2092                 dret = 0;
2093
2094                 process_count++;
2095
2096                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2097                 /* handle HPD short pulse irq */
2098                 if (aconnector->mst_mgr.mst_state)
2099                         drm_dp_mst_hpd_irq(
2100                                 &aconnector->mst_mgr,
2101                                 esi,
2102                                 &new_irq_handled);
2103
2104                 if (new_irq_handled) {
2105                         /* ACK at DPCD to notify down stream */
2106                         const int ack_dpcd_bytes_to_write =
2107                                 dpcd_bytes_to_read - 1;
2108
2109                         for (retry = 0; retry < 3; retry++) {
2110                                 uint8_t wret;
2111
2112                                 wret = drm_dp_dpcd_write(
2113                                         &aconnector->dm_dp_aux.aux,
2114                                         dpcd_addr + 1,
2115                                         &esi[1],
2116                                         ack_dpcd_bytes_to_write);
2117                                 if (wret == ack_dpcd_bytes_to_write)
2118                                         break;
2119                         }
2120
2121                         /* check if there is new irq to be handled */
2122                         dret = drm_dp_dpcd_read(
2123                                 &aconnector->dm_dp_aux.aux,
2124                                 dpcd_addr,
2125                                 esi,
2126                                 dpcd_bytes_to_read);
2127
2128                         new_irq_handled = false;
2129                 } else {
2130                         break;
2131                 }
2132         }
2133
2134         if (process_count == max_process_count)
2135                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2136 }
2137
2138 static void handle_hpd_rx_irq(void *param)
2139 {
2140         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2141         struct drm_connector *connector = &aconnector->base;
2142         struct drm_device *dev = connector->dev;
2143         struct dc_link *dc_link = aconnector->dc_link;
2144         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2145         enum dc_connection_type new_connection_type = dc_connection_none;
2146 #ifdef CONFIG_DRM_AMD_DC_HDCP
2147         union hpd_irq_data hpd_irq_data;
2148         struct amdgpu_device *adev = dev->dev_private;
2149
2150         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2151 #endif
2152
2153         /*
2154          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2155          * conflict, after implement i2c helper, this mutex should be
2156          * retired.
2157          */
2158         if (dc_link->type != dc_connection_mst_branch)
2159                 mutex_lock(&aconnector->hpd_lock);
2160
2161
2162 #ifdef CONFIG_DRM_AMD_DC_HDCP
2163         if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2164 #else
2165         if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2166 #endif
2167                         !is_mst_root_connector) {
2168                 /* Downstream Port status changed. */
2169                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2170                         DRM_ERROR("KMS: Failed to detect connector\n");
2171
2172                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2173                         emulated_link_detect(dc_link);
2174
2175                         if (aconnector->fake_enable)
2176                                 aconnector->fake_enable = false;
2177
2178                         amdgpu_dm_update_connector_after_detect(aconnector);
2179
2180
2181                         drm_modeset_lock_all(dev);
2182                         dm_restore_drm_connector_state(dev, connector);
2183                         drm_modeset_unlock_all(dev);
2184
2185                         drm_kms_helper_hotplug_event(dev);
2186                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2187
2188                         if (aconnector->fake_enable)
2189                                 aconnector->fake_enable = false;
2190
2191                         amdgpu_dm_update_connector_after_detect(aconnector);
2192
2193
2194                         drm_modeset_lock_all(dev);
2195                         dm_restore_drm_connector_state(dev, connector);
2196                         drm_modeset_unlock_all(dev);
2197
2198                         drm_kms_helper_hotplug_event(dev);
2199                 }
2200         }
2201 #ifdef CONFIG_DRM_AMD_DC_HDCP
2202         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2203                 if (adev->dm.hdcp_workqueue)
2204                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2205         }
2206 #endif
2207         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2208             (dc_link->type == dc_connection_mst_branch))
2209                 dm_handle_hpd_rx_irq(aconnector);
2210
2211         if (dc_link->type != dc_connection_mst_branch) {
2212                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2213                 mutex_unlock(&aconnector->hpd_lock);
2214         }
2215 }
2216
2217 static void register_hpd_handlers(struct amdgpu_device *adev)
2218 {
2219         struct drm_device *dev = adev->ddev;
2220         struct drm_connector *connector;
2221         struct amdgpu_dm_connector *aconnector;
2222         const struct dc_link *dc_link;
2223         struct dc_interrupt_params int_params = {0};
2224
2225         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2226         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2227
2228         list_for_each_entry(connector,
2229                         &dev->mode_config.connector_list, head) {
2230
2231                 aconnector = to_amdgpu_dm_connector(connector);
2232                 dc_link = aconnector->dc_link;
2233
2234                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2235                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2236                         int_params.irq_source = dc_link->irq_source_hpd;
2237
2238                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2239                                         handle_hpd_irq,
2240                                         (void *) aconnector);
2241                 }
2242
2243                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2244
2245                         /* Also register for DP short pulse (hpd_rx). */
2246                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2247                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2248
2249                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2250                                         handle_hpd_rx_irq,
2251                                         (void *) aconnector);
2252                 }
2253         }
2254 }
2255
2256 /* Register IRQ sources and initialize IRQ callbacks */
2257 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2258 {
2259         struct dc *dc = adev->dm.dc;
2260         struct common_irq_params *c_irq_params;
2261         struct dc_interrupt_params int_params = {0};
2262         int r;
2263         int i;
2264         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2265
2266         if (adev->asic_type >= CHIP_VEGA10)
2267                 client_id = SOC15_IH_CLIENTID_DCE;
2268
2269         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2270         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2271
2272         /*
2273          * Actions of amdgpu_irq_add_id():
2274          * 1. Register a set() function with base driver.
2275          *    Base driver will call set() function to enable/disable an
2276          *    interrupt in DC hardware.
2277          * 2. Register amdgpu_dm_irq_handler().
2278          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2279          *    coming from DC hardware.
2280          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2281          *    for acknowledging and handling. */
2282
2283         /* Use VBLANK interrupt */
2284         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2285                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2286                 if (r) {
2287                         DRM_ERROR("Failed to add crtc irq id!\n");
2288                         return r;
2289                 }
2290
2291                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2292                 int_params.irq_source =
2293                         dc_interrupt_to_irq_source(dc, i, 0);
2294
2295                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2296
2297                 c_irq_params->adev = adev;
2298                 c_irq_params->irq_src = int_params.irq_source;
2299
2300                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2301                                 dm_crtc_high_irq, c_irq_params);
2302         }
2303
2304         /* Use VUPDATE interrupt */
2305         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2306                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2307                 if (r) {
2308                         DRM_ERROR("Failed to add vupdate irq id!\n");
2309                         return r;
2310                 }
2311
2312                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2313                 int_params.irq_source =
2314                         dc_interrupt_to_irq_source(dc, i, 0);
2315
2316                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2317
2318                 c_irq_params->adev = adev;
2319                 c_irq_params->irq_src = int_params.irq_source;
2320
2321                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2322                                 dm_vupdate_high_irq, c_irq_params);
2323         }
2324
2325         /* Use GRPH_PFLIP interrupt */
2326         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2327                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2328                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2329                 if (r) {
2330                         DRM_ERROR("Failed to add page flip irq id!\n");
2331                         return r;
2332                 }
2333
2334                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2335                 int_params.irq_source =
2336                         dc_interrupt_to_irq_source(dc, i, 0);
2337
2338                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2339
2340                 c_irq_params->adev = adev;
2341                 c_irq_params->irq_src = int_params.irq_source;
2342
2343                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2344                                 dm_pflip_high_irq, c_irq_params);
2345
2346         }
2347
2348         /* HPD */
2349         r = amdgpu_irq_add_id(adev, client_id,
2350                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2351         if (r) {
2352                 DRM_ERROR("Failed to add hpd irq id!\n");
2353                 return r;
2354         }
2355
2356         register_hpd_handlers(adev);
2357
2358         return 0;
2359 }
2360
2361 #if defined(CONFIG_DRM_AMD_DC_DCN)
2362 /* Register IRQ sources and initialize IRQ callbacks */
2363 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2364 {
2365         struct dc *dc = adev->dm.dc;
2366         struct common_irq_params *c_irq_params;
2367         struct dc_interrupt_params int_params = {0};
2368         int r;
2369         int i;
2370
2371         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2372         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2373
2374         /*
2375          * Actions of amdgpu_irq_add_id():
2376          * 1. Register a set() function with base driver.
2377          *    Base driver will call set() function to enable/disable an
2378          *    interrupt in DC hardware.
2379          * 2. Register amdgpu_dm_irq_handler().
2380          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2381          *    coming from DC hardware.
2382          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2383          *    for acknowledging and handling.
2384          */
2385
2386         /* Use VSTARTUP interrupt */
2387         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2388                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2389                         i++) {
2390                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2391
2392                 if (r) {
2393                         DRM_ERROR("Failed to add crtc irq id!\n");
2394                         return r;
2395                 }
2396
2397                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2398                 int_params.irq_source =
2399                         dc_interrupt_to_irq_source(dc, i, 0);
2400
2401                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2402
2403                 c_irq_params->adev = adev;
2404                 c_irq_params->irq_src = int_params.irq_source;
2405
2406                 amdgpu_dm_irq_register_interrupt(
2407                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2408         }
2409
2410         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2411          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2412          * to trigger at end of each vblank, regardless of state of the lock,
2413          * matching DCE behaviour.
2414          */
2415         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2416              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2417              i++) {
2418                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2419
2420                 if (r) {
2421                         DRM_ERROR("Failed to add vupdate irq id!\n");
2422                         return r;
2423                 }
2424
2425                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2426                 int_params.irq_source =
2427                         dc_interrupt_to_irq_source(dc, i, 0);
2428
2429                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2430
2431                 c_irq_params->adev = adev;
2432                 c_irq_params->irq_src = int_params.irq_source;
2433
2434                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2435                                 dm_vupdate_high_irq, c_irq_params);
2436         }
2437
2438         /* Use GRPH_PFLIP interrupt */
2439         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2440                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2441                         i++) {
2442                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2443                 if (r) {
2444                         DRM_ERROR("Failed to add page flip irq id!\n");
2445                         return r;
2446                 }
2447
2448                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2449                 int_params.irq_source =
2450                         dc_interrupt_to_irq_source(dc, i, 0);
2451
2452                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2453
2454                 c_irq_params->adev = adev;
2455                 c_irq_params->irq_src = int_params.irq_source;
2456
2457                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2458                                 dm_pflip_high_irq, c_irq_params);
2459
2460         }
2461
2462         /* HPD */
2463         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2464                         &adev->hpd_irq);
2465         if (r) {
2466                 DRM_ERROR("Failed to add hpd irq id!\n");
2467                 return r;
2468         }
2469
2470         register_hpd_handlers(adev);
2471
2472         return 0;
2473 }
2474 #endif
2475
2476 /*
2477  * Acquires the lock for the atomic state object and returns
2478  * the new atomic state.
2479  *
2480  * This should only be called during atomic check.
2481  */
2482 static int dm_atomic_get_state(struct drm_atomic_state *state,
2483                                struct dm_atomic_state **dm_state)
2484 {
2485         struct drm_device *dev = state->dev;
2486         struct amdgpu_device *adev = dev->dev_private;
2487         struct amdgpu_display_manager *dm = &adev->dm;
2488         struct drm_private_state *priv_state;
2489
2490         if (*dm_state)
2491                 return 0;
2492
2493         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2494         if (IS_ERR(priv_state))
2495                 return PTR_ERR(priv_state);
2496
2497         *dm_state = to_dm_atomic_state(priv_state);
2498
2499         return 0;
2500 }
2501
2502 struct dm_atomic_state *
2503 dm_atomic_get_new_state(struct drm_atomic_state *state)
2504 {
2505         struct drm_device *dev = state->dev;
2506         struct amdgpu_device *adev = dev->dev_private;
2507         struct amdgpu_display_manager *dm = &adev->dm;
2508         struct drm_private_obj *obj;
2509         struct drm_private_state *new_obj_state;
2510         int i;
2511
2512         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2513                 if (obj->funcs == dm->atomic_obj.funcs)
2514                         return to_dm_atomic_state(new_obj_state);
2515         }
2516
2517         return NULL;
2518 }
2519
2520 struct dm_atomic_state *
2521 dm_atomic_get_old_state(struct drm_atomic_state *state)
2522 {
2523         struct drm_device *dev = state->dev;
2524         struct amdgpu_device *adev = dev->dev_private;
2525         struct amdgpu_display_manager *dm = &adev->dm;
2526         struct drm_private_obj *obj;
2527         struct drm_private_state *old_obj_state;
2528         int i;
2529
2530         for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
2531                 if (obj->funcs == dm->atomic_obj.funcs)
2532                         return to_dm_atomic_state(old_obj_state);
2533         }
2534
2535         return NULL;
2536 }
2537
2538 static struct drm_private_state *
2539 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2540 {
2541         struct dm_atomic_state *old_state, *new_state;
2542
2543         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2544         if (!new_state)
2545                 return NULL;
2546
2547         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2548
2549         old_state = to_dm_atomic_state(obj->state);
2550
2551         if (old_state && old_state->context)
2552                 new_state->context = dc_copy_state(old_state->context);
2553
2554         if (!new_state->context) {
2555                 kfree(new_state);
2556                 return NULL;
2557         }
2558
2559         return &new_state->base;
2560 }
2561
2562 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2563                                     struct drm_private_state *state)
2564 {
2565         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2566
2567         if (dm_state && dm_state->context)
2568                 dc_release_state(dm_state->context);
2569
2570         kfree(dm_state);
2571 }
2572
2573 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2574         .atomic_duplicate_state = dm_atomic_duplicate_state,
2575         .atomic_destroy_state = dm_atomic_destroy_state,
2576 };
2577
2578 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2579 {
2580         struct dm_atomic_state *state;
2581         int r;
2582
2583         adev->mode_info.mode_config_initialized = true;
2584
2585         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2586         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2587
2588         adev->ddev->mode_config.max_width = 16384;
2589         adev->ddev->mode_config.max_height = 16384;
2590
2591         adev->ddev->mode_config.preferred_depth = 24;
2592         adev->ddev->mode_config.prefer_shadow = 1;
2593         /* indicates support for immediate flip */
2594         adev->ddev->mode_config.async_page_flip = true;
2595
2596         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2597
2598         state = kzalloc(sizeof(*state), GFP_KERNEL);
2599         if (!state)
2600                 return -ENOMEM;
2601
2602         state->context = dc_create_state(adev->dm.dc);
2603         if (!state->context) {
2604                 kfree(state);
2605                 return -ENOMEM;
2606         }
2607
2608         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2609
2610         drm_atomic_private_obj_init(adev->ddev,
2611                                     &adev->dm.atomic_obj,
2612                                     &state->base,
2613                                     &dm_atomic_state_funcs);
2614
2615         r = amdgpu_display_modeset_create_props(adev);
2616         if (r)
2617                 return r;
2618
2619         r = amdgpu_dm_audio_init(adev);
2620         if (r)
2621                 return r;
2622
2623         return 0;
2624 }
2625
2626 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
2627 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
2628 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
2629
2630 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2631         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2632
2633 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
2634 {
2635 #if defined(CONFIG_ACPI)
2636         struct amdgpu_dm_backlight_caps caps;
2637
2638         if (dm->backlight_caps.caps_valid)
2639                 return;
2640
2641         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
2642         if (caps.caps_valid) {
2643                 dm->backlight_caps.caps_valid = true;
2644                 if (caps.aux_support)
2645                         return;
2646                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
2647                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
2648         } else {
2649                 dm->backlight_caps.min_input_signal =
2650                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2651                 dm->backlight_caps.max_input_signal =
2652                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2653         }
2654 #else
2655         if (dm->backlight_caps.aux_support)
2656                 return;
2657
2658         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
2659         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
2660 #endif
2661 }
2662
2663 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
2664 {
2665         bool rc;
2666
2667         if (!link)
2668                 return 1;
2669
2670         rc = dc_link_set_backlight_level_nits(link, true, brightness,
2671                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
2672
2673         return rc ? 0 : 1;
2674 }
2675
2676 static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
2677                               const uint32_t user_brightness)
2678 {
2679         u32 min, max, conversion_pace;
2680         u32 brightness = user_brightness;
2681
2682         if (!caps)
2683                 goto out;
2684
2685         if (!caps->aux_support) {
2686                 max = caps->max_input_signal;
2687                 min = caps->min_input_signal;
2688                 /*
2689                  * The brightness input is in the range 0-255
2690                  * It needs to be rescaled to be between the
2691                  * requested min and max input signal
2692                  * It also needs to be scaled up by 0x101 to
2693                  * match the DC interface which has a range of
2694                  * 0 to 0xffff
2695                  */
2696                 conversion_pace = 0x101;
2697                 brightness =
2698                         user_brightness
2699                         * conversion_pace
2700                         * (max - min)
2701                         / AMDGPU_MAX_BL_LEVEL
2702                         + min * conversion_pace;
2703         } else {
2704                 /* TODO
2705                  * We are doing a linear interpolation here, which is OK but
2706                  * does not provide the optimal result. We probably want
2707                  * something close to the Perceptual Quantizer (PQ) curve.
2708                  */
2709                 max = caps->aux_max_input_signal;
2710                 min = caps->aux_min_input_signal;
2711
2712                 brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
2713                                + user_brightness * max;
2714                 // Multiple the value by 1000 since we use millinits
2715                 brightness *= 1000;
2716                 brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
2717         }
2718
2719 out:
2720         return brightness;
2721 }
2722
2723 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
2724 {
2725         struct amdgpu_display_manager *dm = bl_get_data(bd);
2726         struct amdgpu_dm_backlight_caps caps;
2727         struct dc_link *link = NULL;
2728         u32 brightness;
2729         bool rc;
2730
2731         amdgpu_dm_update_backlight_caps(dm);
2732         caps = dm->backlight_caps;
2733
2734         link = (struct dc_link *)dm->backlight_link;
2735
2736         brightness = convert_brightness(&caps, bd->props.brightness);
2737         // Change brightness based on AUX property
2738         if (caps.aux_support)
2739                 return set_backlight_via_aux(link, brightness);
2740
2741         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
2742
2743         return rc ? 0 : 1;
2744 }
2745
2746 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
2747 {
2748         struct amdgpu_display_manager *dm = bl_get_data(bd);
2749         int ret = dc_link_get_backlight_level(dm->backlight_link);
2750
2751         if (ret == DC_ERROR_UNEXPECTED)
2752                 return bd->props.brightness;
2753         return ret;
2754 }
2755
2756 static const struct backlight_ops amdgpu_dm_backlight_ops = {
2757         .options = BL_CORE_SUSPENDRESUME,
2758         .get_brightness = amdgpu_dm_backlight_get_brightness,
2759         .update_status  = amdgpu_dm_backlight_update_status,
2760 };
2761
2762 static void
2763 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
2764 {
2765         char bl_name[16];
2766         struct backlight_properties props = { 0 };
2767
2768         amdgpu_dm_update_backlight_caps(dm);
2769
2770         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
2771         props.brightness = AMDGPU_MAX_BL_LEVEL;
2772         props.type = BACKLIGHT_RAW;
2773
2774         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
2775                         dm->adev->ddev->primary->index);
2776
2777         dm->backlight_dev = backlight_device_register(bl_name,
2778                         dm->adev->ddev->dev,
2779                         dm,
2780                         &amdgpu_dm_backlight_ops,
2781                         &props);
2782
2783         if (IS_ERR(dm->backlight_dev))
2784                 DRM_ERROR("DM: Backlight registration failed!\n");
2785         else
2786                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
2787 }
2788
2789 #endif
2790
2791 static int initialize_plane(struct amdgpu_display_manager *dm,
2792                             struct amdgpu_mode_info *mode_info, int plane_id,
2793                             enum drm_plane_type plane_type,
2794                             const struct dc_plane_cap *plane_cap)
2795 {
2796         struct drm_plane *plane;
2797         unsigned long possible_crtcs;
2798         int ret = 0;
2799
2800         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
2801         if (!plane) {
2802                 DRM_ERROR("KMS: Failed to allocate plane\n");
2803                 return -ENOMEM;
2804         }
2805         plane->type = plane_type;
2806
2807         /*
2808          * HACK: IGT tests expect that the primary plane for a CRTC
2809          * can only have one possible CRTC. Only expose support for
2810          * any CRTC if they're not going to be used as a primary plane
2811          * for a CRTC - like overlay or underlay planes.
2812          */
2813         possible_crtcs = 1 << plane_id;
2814         if (plane_id >= dm->dc->caps.max_streams)
2815                 possible_crtcs = 0xff;
2816
2817         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
2818
2819         if (ret) {
2820                 DRM_ERROR("KMS: Failed to initialize plane\n");
2821                 kfree(plane);
2822                 return ret;
2823         }
2824
2825         if (mode_info)
2826                 mode_info->planes[plane_id] = plane;
2827
2828         return ret;
2829 }
2830
2831
2832 static void register_backlight_device(struct amdgpu_display_manager *dm,
2833                                       struct dc_link *link)
2834 {
2835 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2836         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2837
2838         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
2839             link->type != dc_connection_none) {
2840                 /*
2841                  * Event if registration failed, we should continue with
2842                  * DM initialization because not having a backlight control
2843                  * is better then a black screen.
2844                  */
2845                 amdgpu_dm_register_backlight_device(dm);
2846
2847                 if (dm->backlight_dev)
2848                         dm->backlight_link = link;
2849         }
2850 #endif
2851 }
2852
2853
2854 /*
2855  * In this architecture, the association
2856  * connector -> encoder -> crtc
2857  * id not really requried. The crtc and connector will hold the
2858  * display_index as an abstraction to use with DAL component
2859  *
2860  * Returns 0 on success
2861  */
2862 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
2863 {
2864         struct amdgpu_display_manager *dm = &adev->dm;
2865         int32_t i;
2866         struct amdgpu_dm_connector *aconnector = NULL;
2867         struct amdgpu_encoder *aencoder = NULL;
2868         struct amdgpu_mode_info *mode_info = &adev->mode_info;
2869         uint32_t link_cnt;
2870         int32_t primary_planes;
2871         enum dc_connection_type new_connection_type = dc_connection_none;
2872         const struct dc_plane_cap *plane;
2873
2874         link_cnt = dm->dc->caps.max_links;
2875         if (amdgpu_dm_mode_config_init(dm->adev)) {
2876                 DRM_ERROR("DM: Failed to initialize mode config\n");
2877                 return -EINVAL;
2878         }
2879
2880         /* There is one primary plane per CRTC */
2881         primary_planes = dm->dc->caps.max_streams;
2882         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
2883
2884         /*
2885          * Initialize primary planes, implicit planes for legacy IOCTLS.
2886          * Order is reversed to match iteration order in atomic check.
2887          */
2888         for (i = (primary_planes - 1); i >= 0; i--) {
2889                 plane = &dm->dc->caps.planes[i];
2890
2891                 if (initialize_plane(dm, mode_info, i,
2892                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
2893                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
2894                         goto fail;
2895                 }
2896         }
2897
2898         /*
2899          * Initialize overlay planes, index starting after primary planes.
2900          * These planes have a higher DRM index than the primary planes since
2901          * they should be considered as having a higher z-order.
2902          * Order is reversed to match iteration order in atomic check.
2903          *
2904          * Only support DCN for now, and only expose one so we don't encourage
2905          * userspace to use up all the pipes.
2906          */
2907         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
2908                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
2909
2910                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
2911                         continue;
2912
2913                 if (!plane->blends_with_above || !plane->blends_with_below)
2914                         continue;
2915
2916                 if (!plane->pixel_format_support.argb8888)
2917                         continue;
2918
2919                 if (initialize_plane(dm, NULL, primary_planes + i,
2920                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
2921                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
2922                         goto fail;
2923                 }
2924
2925                 /* Only create one overlay plane. */
2926                 break;
2927         }
2928
2929         for (i = 0; i < dm->dc->caps.max_streams; i++)
2930                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
2931                         DRM_ERROR("KMS: Failed to initialize crtc\n");
2932                         goto fail;
2933                 }
2934
2935         dm->display_indexes_num = dm->dc->caps.max_streams;
2936
2937         /* loops over all connectors on the board */
2938         for (i = 0; i < link_cnt; i++) {
2939                 struct dc_link *link = NULL;
2940
2941                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
2942                         DRM_ERROR(
2943                                 "KMS: Cannot support more than %d display indexes\n",
2944                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
2945                         continue;
2946                 }
2947
2948                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
2949                 if (!aconnector)
2950                         goto fail;
2951
2952                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
2953                 if (!aencoder)
2954                         goto fail;
2955
2956                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
2957                         DRM_ERROR("KMS: Failed to initialize encoder\n");
2958                         goto fail;
2959                 }
2960
2961                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
2962                         DRM_ERROR("KMS: Failed to initialize connector\n");
2963                         goto fail;
2964                 }
2965
2966                 link = dc_get_link_at_index(dm->dc, i);
2967
2968                 if (!dc_link_detect_sink(link, &new_connection_type))
2969                         DRM_ERROR("KMS: Failed to detect connector\n");
2970
2971                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2972                         emulated_link_detect(link);
2973                         amdgpu_dm_update_connector_after_detect(aconnector);
2974
2975                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
2976                         amdgpu_dm_update_connector_after_detect(aconnector);
2977                         register_backlight_device(dm, link);
2978                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
2979                                 amdgpu_dm_set_psr_caps(link);
2980                 }
2981
2982
2983         }
2984
2985         /* Software is initialized. Now we can register interrupt handlers. */
2986         switch (adev->asic_type) {
2987         case CHIP_BONAIRE:
2988         case CHIP_HAWAII:
2989         case CHIP_KAVERI:
2990         case CHIP_KABINI:
2991         case CHIP_MULLINS:
2992         case CHIP_TONGA:
2993         case CHIP_FIJI:
2994         case CHIP_CARRIZO:
2995         case CHIP_STONEY:
2996         case CHIP_POLARIS11:
2997         case CHIP_POLARIS10:
2998         case CHIP_POLARIS12:
2999         case CHIP_VEGAM:
3000         case CHIP_VEGA10:
3001         case CHIP_VEGA12:
3002         case CHIP_VEGA20:
3003                 if (dce110_register_irq_handlers(dm->adev)) {
3004                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3005                         goto fail;
3006                 }
3007                 break;
3008 #if defined(CONFIG_DRM_AMD_DC_DCN)
3009         case CHIP_RAVEN:
3010         case CHIP_NAVI12:
3011         case CHIP_NAVI10:
3012         case CHIP_NAVI14:
3013         case CHIP_RENOIR:
3014                 if (dcn10_register_irq_handlers(dm->adev)) {
3015                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3016                         goto fail;
3017                 }
3018                 break;
3019 #endif
3020         default:
3021                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3022                 goto fail;
3023         }
3024
3025         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
3026                 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
3027
3028         /* No userspace support. */
3029         dm->dc->debug.disable_tri_buf = true;
3030
3031         return 0;
3032 fail:
3033         kfree(aencoder);
3034         kfree(aconnector);
3035
3036         return -EINVAL;
3037 }
3038
3039 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3040 {
3041         drm_mode_config_cleanup(dm->ddev);
3042         drm_atomic_private_obj_fini(&dm->atomic_obj);
3043         return;
3044 }
3045
3046 /******************************************************************************
3047  * amdgpu_display_funcs functions
3048  *****************************************************************************/
3049
3050 /*
3051  * dm_bandwidth_update - program display watermarks
3052  *
3053  * @adev: amdgpu_device pointer
3054  *
3055  * Calculate and program the display watermarks and line buffer allocation.
3056  */
3057 static void dm_bandwidth_update(struct amdgpu_device *adev)
3058 {
3059         /* TODO: implement later */
3060 }
3061
3062 static const struct amdgpu_display_funcs dm_display_funcs = {
3063         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3064         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3065         .backlight_set_level = NULL, /* never called for DC */
3066         .backlight_get_level = NULL, /* never called for DC */
3067         .hpd_sense = NULL,/* called unconditionally */
3068         .hpd_set_polarity = NULL, /* called unconditionally */
3069         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3070         .page_flip_get_scanoutpos =
3071                 dm_crtc_get_scanoutpos,/* called unconditionally */
3072         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3073         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3074 };
3075
3076 #if defined(CONFIG_DEBUG_KERNEL_DC)
3077
3078 static ssize_t s3_debug_store(struct device *device,
3079                               struct device_attribute *attr,
3080                               const char *buf,
3081                               size_t count)
3082 {
3083         int ret;
3084         int s3_state;
3085         struct drm_device *drm_dev = dev_get_drvdata(device);
3086         struct amdgpu_device *adev = drm_dev->dev_private;
3087
3088         ret = kstrtoint(buf, 0, &s3_state);
3089
3090         if (ret == 0) {
3091                 if (s3_state) {
3092                         dm_resume(adev);
3093                         drm_kms_helper_hotplug_event(adev->ddev);
3094                 } else
3095                         dm_suspend(adev);
3096         }
3097
3098         return ret == 0 ? count : 0;
3099 }
3100
3101 DEVICE_ATTR_WO(s3_debug);
3102
3103 #endif
3104
3105 static int dm_early_init(void *handle)
3106 {
3107         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3108
3109         switch (adev->asic_type) {
3110         case CHIP_BONAIRE:
3111         case CHIP_HAWAII:
3112                 adev->mode_info.num_crtc = 6;
3113                 adev->mode_info.num_hpd = 6;
3114                 adev->mode_info.num_dig = 6;
3115                 break;
3116         case CHIP_KAVERI:
3117                 adev->mode_info.num_crtc = 4;
3118                 adev->mode_info.num_hpd = 6;
3119                 adev->mode_info.num_dig = 7;
3120                 break;
3121         case CHIP_KABINI:
3122         case CHIP_MULLINS:
3123                 adev->mode_info.num_crtc = 2;
3124                 adev->mode_info.num_hpd = 6;
3125                 adev->mode_info.num_dig = 6;
3126                 break;
3127         case CHIP_FIJI:
3128         case CHIP_TONGA:
3129                 adev->mode_info.num_crtc = 6;
3130                 adev->mode_info.num_hpd = 6;
3131                 adev->mode_info.num_dig = 7;
3132                 break;
3133         case CHIP_CARRIZO:
3134                 adev->mode_info.num_crtc = 3;
3135                 adev->mode_info.num_hpd = 6;
3136                 adev->mode_info.num_dig = 9;
3137                 break;
3138         case CHIP_STONEY:
3139                 adev->mode_info.num_crtc = 2;
3140                 adev->mode_info.num_hpd = 6;
3141                 adev->mode_info.num_dig = 9;
3142                 break;
3143         case CHIP_POLARIS11:
3144         case CHIP_POLARIS12:
3145                 adev->mode_info.num_crtc = 5;
3146                 adev->mode_info.num_hpd = 5;
3147                 adev->mode_info.num_dig = 5;
3148                 break;
3149         case CHIP_POLARIS10:
3150         case CHIP_VEGAM:
3151                 adev->mode_info.num_crtc = 6;
3152                 adev->mode_info.num_hpd = 6;
3153                 adev->mode_info.num_dig = 6;
3154                 break;
3155         case CHIP_VEGA10:
3156         case CHIP_VEGA12:
3157         case CHIP_VEGA20:
3158                 adev->mode_info.num_crtc = 6;
3159                 adev->mode_info.num_hpd = 6;
3160                 adev->mode_info.num_dig = 6;
3161                 break;
3162 #if defined(CONFIG_DRM_AMD_DC_DCN)
3163         case CHIP_RAVEN:
3164                 adev->mode_info.num_crtc = 4;
3165                 adev->mode_info.num_hpd = 4;
3166                 adev->mode_info.num_dig = 4;
3167                 break;
3168 #endif
3169         case CHIP_NAVI10:
3170         case CHIP_NAVI12:
3171                 adev->mode_info.num_crtc = 6;
3172                 adev->mode_info.num_hpd = 6;
3173                 adev->mode_info.num_dig = 6;
3174                 break;
3175         case CHIP_NAVI14:
3176                 adev->mode_info.num_crtc = 5;
3177                 adev->mode_info.num_hpd = 5;
3178                 adev->mode_info.num_dig = 5;
3179                 break;
3180         case CHIP_RENOIR:
3181                 adev->mode_info.num_crtc = 4;
3182                 adev->mode_info.num_hpd = 4;
3183                 adev->mode_info.num_dig = 4;
3184                 break;
3185         default:
3186                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3187                 return -EINVAL;
3188         }
3189
3190         amdgpu_dm_set_irq_funcs(adev);
3191
3192         if (adev->mode_info.funcs == NULL)
3193                 adev->mode_info.funcs = &dm_display_funcs;
3194
3195         /*
3196          * Note: Do NOT change adev->audio_endpt_rreg and
3197          * adev->audio_endpt_wreg because they are initialised in
3198          * amdgpu_device_init()
3199          */
3200 #if defined(CONFIG_DEBUG_KERNEL_DC)
3201         device_create_file(
3202                 adev->ddev->dev,
3203                 &dev_attr_s3_debug);
3204 #endif
3205
3206         return 0;
3207 }
3208
3209 static bool modeset_required(struct drm_crtc_state *crtc_state,
3210                              struct dc_stream_state *new_stream,
3211                              struct dc_stream_state *old_stream)
3212 {
3213         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3214                 return false;
3215
3216         if (!crtc_state->enable)
3217                 return false;
3218
3219         return crtc_state->active;
3220 }
3221
3222 static bool modereset_required(struct drm_crtc_state *crtc_state)
3223 {
3224         if (!drm_atomic_crtc_needs_modeset(crtc_state))
3225                 return false;
3226
3227         return !crtc_state->enable || !crtc_state->active;
3228 }
3229
3230 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3231 {
3232         drm_encoder_cleanup(encoder);
3233         kfree(encoder);
3234 }
3235
3236 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3237         .destroy = amdgpu_dm_encoder_destroy,
3238 };
3239
3240
3241 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3242                                 struct dc_scaling_info *scaling_info)
3243 {
3244         int scale_w, scale_h;
3245
3246         memset(scaling_info, 0, sizeof(*scaling_info));
3247
3248         /* Source is fixed 16.16 but we ignore mantissa for now... */
3249         scaling_info->src_rect.x = state->src_x >> 16;
3250         scaling_info->src_rect.y = state->src_y >> 16;
3251
3252         scaling_info->src_rect.width = state->src_w >> 16;
3253         if (scaling_info->src_rect.width == 0)
3254                 return -EINVAL;
3255
3256         scaling_info->src_rect.height = state->src_h >> 16;
3257         if (scaling_info->src_rect.height == 0)
3258                 return -EINVAL;
3259
3260         scaling_info->dst_rect.x = state->crtc_x;
3261         scaling_info->dst_rect.y = state->crtc_y;
3262
3263         if (state->crtc_w == 0)
3264                 return -EINVAL;
3265
3266         scaling_info->dst_rect.width = state->crtc_w;
3267
3268         if (state->crtc_h == 0)
3269                 return -EINVAL;
3270
3271         scaling_info->dst_rect.height = state->crtc_h;
3272
3273         /* DRM doesn't specify clipping on destination output. */
3274         scaling_info->clip_rect = scaling_info->dst_rect;
3275
3276         /* TODO: Validate scaling per-format with DC plane caps */
3277         scale_w = scaling_info->dst_rect.width * 1000 /
3278                   scaling_info->src_rect.width;
3279
3280         if (scale_w < 250 || scale_w > 16000)
3281                 return -EINVAL;
3282
3283         scale_h = scaling_info->dst_rect.height * 1000 /
3284                   scaling_info->src_rect.height;
3285
3286         if (scale_h < 250 || scale_h > 16000)
3287                 return -EINVAL;
3288
3289         /*
3290          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3291          * assume reasonable defaults based on the format.
3292          */
3293
3294         return 0;
3295 }
3296
3297 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3298                        uint64_t *tiling_flags, bool *tmz_surface)
3299 {
3300         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3301         int r = amdgpu_bo_reserve(rbo, false);
3302
3303         if (unlikely(r)) {
3304                 /* Don't show error message when returning -ERESTARTSYS */
3305                 if (r != -ERESTARTSYS)
3306                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
3307                 return r;
3308         }
3309
3310         if (tiling_flags)
3311                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3312
3313         if (tmz_surface)
3314                 *tmz_surface = amdgpu_bo_encrypted(rbo);
3315
3316         amdgpu_bo_unreserve(rbo);
3317
3318         return r;
3319 }
3320
3321 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3322 {
3323         uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3324
3325         return offset ? (address + offset * 256) : 0;
3326 }
3327
3328 static int
3329 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3330                           const struct amdgpu_framebuffer *afb,
3331                           const enum surface_pixel_format format,
3332                           const enum dc_rotation_angle rotation,
3333                           const struct plane_size *plane_size,
3334                           const union dc_tiling_info *tiling_info,
3335                           const uint64_t info,
3336                           struct dc_plane_dcc_param *dcc,
3337                           struct dc_plane_address *address,
3338                           bool force_disable_dcc)
3339 {
3340         struct dc *dc = adev->dm.dc;
3341         struct dc_dcc_surface_param input;
3342         struct dc_surface_dcc_cap output;
3343         uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3344         uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3345         uint64_t dcc_address;
3346
3347         memset(&input, 0, sizeof(input));
3348         memset(&output, 0, sizeof(output));
3349
3350         if (force_disable_dcc)
3351                 return 0;
3352
3353         if (!offset)
3354                 return 0;
3355
3356         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3357                 return 0;
3358
3359         if (!dc->cap_funcs.get_dcc_compression_cap)
3360                 return -EINVAL;
3361
3362         input.format = format;
3363         input.surface_size.width = plane_size->surface_size.width;
3364         input.surface_size.height = plane_size->surface_size.height;
3365         input.swizzle_mode = tiling_info->gfx9.swizzle;
3366
3367         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3368                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3369         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3370                 input.scan = SCAN_DIRECTION_VERTICAL;
3371
3372         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3373                 return -EINVAL;
3374
3375         if (!output.capable)
3376                 return -EINVAL;
3377
3378         if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3379                 return -EINVAL;
3380
3381         dcc->enable = 1;
3382         dcc->meta_pitch =
3383                 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3384         dcc->independent_64b_blks = i64b;
3385
3386         dcc_address = get_dcc_address(afb->address, info);
3387         address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3388         address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3389
3390         return 0;
3391 }
3392
3393 static int
3394 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3395                              const struct amdgpu_framebuffer *afb,
3396                              const enum surface_pixel_format format,
3397                              const enum dc_rotation_angle rotation,
3398                              const uint64_t tiling_flags,
3399                              union dc_tiling_info *tiling_info,
3400                              struct plane_size *plane_size,
3401                              struct dc_plane_dcc_param *dcc,
3402                              struct dc_plane_address *address,
3403                              bool tmz_surface,
3404                              bool force_disable_dcc)
3405 {
3406         const struct drm_framebuffer *fb = &afb->base;
3407         int ret;
3408
3409         memset(tiling_info, 0, sizeof(*tiling_info));
3410         memset(plane_size, 0, sizeof(*plane_size));
3411         memset(dcc, 0, sizeof(*dcc));
3412         memset(address, 0, sizeof(*address));
3413
3414         address->tmz_surface = tmz_surface;
3415
3416         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3417                 plane_size->surface_size.x = 0;
3418                 plane_size->surface_size.y = 0;
3419                 plane_size->surface_size.width = fb->width;
3420                 plane_size->surface_size.height = fb->height;
3421                 plane_size->surface_pitch =
3422                         fb->pitches[0] / fb->format->cpp[0];
3423
3424                 address->type = PLN_ADDR_TYPE_GRAPHICS;
3425                 address->grph.addr.low_part = lower_32_bits(afb->address);
3426                 address->grph.addr.high_part = upper_32_bits(afb->address);
3427         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3428                 uint64_t chroma_addr = afb->address + fb->offsets[1];
3429
3430                 plane_size->surface_size.x = 0;
3431                 plane_size->surface_size.y = 0;
3432                 plane_size->surface_size.width = fb->width;
3433                 plane_size->surface_size.height = fb->height;
3434                 plane_size->surface_pitch =
3435                         fb->pitches[0] / fb->format->cpp[0];
3436
3437                 plane_size->chroma_size.x = 0;
3438                 plane_size->chroma_size.y = 0;
3439                 /* TODO: set these based on surface format */
3440                 plane_size->chroma_size.width = fb->width / 2;
3441                 plane_size->chroma_size.height = fb->height / 2;
3442
3443                 plane_size->chroma_pitch =
3444                         fb->pitches[1] / fb->format->cpp[1];
3445
3446                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3447                 address->video_progressive.luma_addr.low_part =
3448                         lower_32_bits(afb->address);
3449                 address->video_progressive.luma_addr.high_part =
3450                         upper_32_bits(afb->address);
3451                 address->video_progressive.chroma_addr.low_part =
3452                         lower_32_bits(chroma_addr);
3453                 address->video_progressive.chroma_addr.high_part =
3454                         upper_32_bits(chroma_addr);
3455         }
3456
3457         /* Fill GFX8 params */
3458         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3459                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3460
3461                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3462                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3463                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3464                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3465                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3466
3467                 /* XXX fix me for VI */
3468                 tiling_info->gfx8.num_banks = num_banks;
3469                 tiling_info->gfx8.array_mode =
3470                                 DC_ARRAY_2D_TILED_THIN1;
3471                 tiling_info->gfx8.tile_split = tile_split;
3472                 tiling_info->gfx8.bank_width = bankw;
3473                 tiling_info->gfx8.bank_height = bankh;
3474                 tiling_info->gfx8.tile_aspect = mtaspect;
3475                 tiling_info->gfx8.tile_mode =
3476                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3477         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3478                         == DC_ARRAY_1D_TILED_THIN1) {
3479                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3480         }
3481
3482         tiling_info->gfx8.pipe_config =
3483                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3484
3485         if (adev->asic_type == CHIP_VEGA10 ||
3486             adev->asic_type == CHIP_VEGA12 ||
3487             adev->asic_type == CHIP_VEGA20 ||
3488             adev->asic_type == CHIP_NAVI10 ||
3489             adev->asic_type == CHIP_NAVI14 ||
3490             adev->asic_type == CHIP_NAVI12 ||
3491             adev->asic_type == CHIP_RENOIR ||
3492             adev->asic_type == CHIP_RAVEN) {
3493                 /* Fill GFX9 params */
3494                 tiling_info->gfx9.num_pipes =
3495                         adev->gfx.config.gb_addr_config_fields.num_pipes;
3496                 tiling_info->gfx9.num_banks =
3497                         adev->gfx.config.gb_addr_config_fields.num_banks;
3498                 tiling_info->gfx9.pipe_interleave =
3499                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3500                 tiling_info->gfx9.num_shader_engines =
3501                         adev->gfx.config.gb_addr_config_fields.num_se;
3502                 tiling_info->gfx9.max_compressed_frags =
3503                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3504                 tiling_info->gfx9.num_rb_per_se =
3505                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3506                 tiling_info->gfx9.swizzle =
3507                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3508                 tiling_info->gfx9.shaderEnable = 1;
3509
3510                 ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3511                                                 plane_size, tiling_info,
3512                                                 tiling_flags, dcc, address,
3513                                                 force_disable_dcc);
3514                 if (ret)
3515                         return ret;
3516         }
3517
3518         return 0;
3519 }
3520
3521 static void
3522 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3523                                bool *per_pixel_alpha, bool *global_alpha,
3524                                int *global_alpha_value)
3525 {
3526         *per_pixel_alpha = false;
3527         *global_alpha = false;
3528         *global_alpha_value = 0xff;
3529
3530         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3531                 return;
3532
3533         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3534                 static const uint32_t alpha_formats[] = {
3535                         DRM_FORMAT_ARGB8888,
3536                         DRM_FORMAT_RGBA8888,
3537                         DRM_FORMAT_ABGR8888,
3538                 };
3539                 uint32_t format = plane_state->fb->format->format;
3540                 unsigned int i;
3541
3542                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3543                         if (format == alpha_formats[i]) {
3544                                 *per_pixel_alpha = true;
3545                                 break;
3546                         }
3547                 }
3548         }
3549
3550         if (plane_state->alpha < 0xffff) {
3551                 *global_alpha = true;
3552                 *global_alpha_value = plane_state->alpha >> 8;
3553         }
3554 }
3555
3556 static int
3557 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3558                             const enum surface_pixel_format format,
3559                             enum dc_color_space *color_space)
3560 {
3561         bool full_range;
3562
3563         *color_space = COLOR_SPACE_SRGB;
3564
3565         /* DRM color properties only affect non-RGB formats. */
3566         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3567                 return 0;
3568
3569         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3570
3571         switch (plane_state->color_encoding) {
3572         case DRM_COLOR_YCBCR_BT601:
3573                 if (full_range)
3574                         *color_space = COLOR_SPACE_YCBCR601;
3575                 else
3576                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
3577                 break;
3578
3579         case DRM_COLOR_YCBCR_BT709:
3580                 if (full_range)
3581                         *color_space = COLOR_SPACE_YCBCR709;
3582                 else
3583                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
3584                 break;
3585
3586         case DRM_COLOR_YCBCR_BT2020:
3587                 if (full_range)
3588                         *color_space = COLOR_SPACE_2020_YCBCR;
3589                 else
3590                         return -EINVAL;
3591                 break;
3592
3593         default:
3594                 return -EINVAL;
3595         }
3596
3597         return 0;
3598 }
3599
3600 static int
3601 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
3602                             const struct drm_plane_state *plane_state,
3603                             const uint64_t tiling_flags,
3604                             struct dc_plane_info *plane_info,
3605                             struct dc_plane_address *address,
3606                             bool tmz_surface,
3607                             bool force_disable_dcc)
3608 {
3609         const struct drm_framebuffer *fb = plane_state->fb;
3610         const struct amdgpu_framebuffer *afb =
3611                 to_amdgpu_framebuffer(plane_state->fb);
3612         struct drm_format_name_buf format_name;
3613         int ret;
3614
3615         memset(plane_info, 0, sizeof(*plane_info));
3616
3617         switch (fb->format->format) {
3618         case DRM_FORMAT_C8:
3619                 plane_info->format =
3620                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
3621                 break;
3622         case DRM_FORMAT_RGB565:
3623                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
3624                 break;
3625         case DRM_FORMAT_XRGB8888:
3626         case DRM_FORMAT_ARGB8888:
3627                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
3628                 break;
3629         case DRM_FORMAT_XRGB2101010:
3630         case DRM_FORMAT_ARGB2101010:
3631                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
3632                 break;
3633         case DRM_FORMAT_XBGR2101010:
3634         case DRM_FORMAT_ABGR2101010:
3635                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
3636                 break;
3637         case DRM_FORMAT_XBGR8888:
3638         case DRM_FORMAT_ABGR8888:
3639                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
3640                 break;
3641         case DRM_FORMAT_NV21:
3642                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
3643                 break;
3644         case DRM_FORMAT_NV12:
3645                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
3646                 break;
3647         case DRM_FORMAT_P010:
3648                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
3649                 break;
3650         case DRM_FORMAT_XRGB16161616F:
3651         case DRM_FORMAT_ARGB16161616F:
3652                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
3653                 break;
3654         default:
3655                 DRM_ERROR(
3656                         "Unsupported screen format %s\n",
3657                         drm_get_format_name(fb->format->format, &format_name));
3658                 return -EINVAL;
3659         }
3660
3661         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
3662         case DRM_MODE_ROTATE_0:
3663                 plane_info->rotation = ROTATION_ANGLE_0;
3664                 break;
3665         case DRM_MODE_ROTATE_90:
3666                 plane_info->rotation = ROTATION_ANGLE_90;
3667                 break;
3668         case DRM_MODE_ROTATE_180:
3669                 plane_info->rotation = ROTATION_ANGLE_180;
3670                 break;
3671         case DRM_MODE_ROTATE_270:
3672                 plane_info->rotation = ROTATION_ANGLE_270;
3673                 break;
3674         default:
3675                 plane_info->rotation = ROTATION_ANGLE_0;
3676                 break;
3677         }
3678
3679         plane_info->visible = true;
3680         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
3681
3682         plane_info->layer_index = 0;
3683
3684         ret = fill_plane_color_attributes(plane_state, plane_info->format,
3685                                           &plane_info->color_space);
3686         if (ret)
3687                 return ret;
3688
3689         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
3690                                            plane_info->rotation, tiling_flags,
3691                                            &plane_info->tiling_info,
3692                                            &plane_info->plane_size,
3693                                            &plane_info->dcc, address, tmz_surface,
3694                                            force_disable_dcc);
3695         if (ret)
3696                 return ret;
3697
3698         fill_blending_from_plane_state(
3699                 plane_state, &plane_info->per_pixel_alpha,
3700                 &plane_info->global_alpha, &plane_info->global_alpha_value);
3701
3702         return 0;
3703 }
3704
3705 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
3706                                     struct dc_plane_state *dc_plane_state,
3707                                     struct drm_plane_state *plane_state,
3708                                     struct drm_crtc_state *crtc_state)
3709 {
3710         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
3711         const struct amdgpu_framebuffer *amdgpu_fb =
3712                 to_amdgpu_framebuffer(plane_state->fb);
3713         struct dc_scaling_info scaling_info;
3714         struct dc_plane_info plane_info;
3715         uint64_t tiling_flags;
3716         int ret;
3717         bool tmz_surface = false;
3718         bool force_disable_dcc = false;
3719
3720         ret = fill_dc_scaling_info(plane_state, &scaling_info);
3721         if (ret)
3722                 return ret;
3723
3724         dc_plane_state->src_rect = scaling_info.src_rect;
3725         dc_plane_state->dst_rect = scaling_info.dst_rect;
3726         dc_plane_state->clip_rect = scaling_info.clip_rect;
3727         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
3728
3729         ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
3730         if (ret)
3731                 return ret;
3732
3733         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
3734         ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
3735                                           &plane_info,
3736                                           &dc_plane_state->address,
3737                                           tmz_surface,
3738                                           force_disable_dcc);
3739         if (ret)
3740                 return ret;
3741
3742         dc_plane_state->format = plane_info.format;
3743         dc_plane_state->color_space = plane_info.color_space;
3744         dc_plane_state->format = plane_info.format;
3745         dc_plane_state->plane_size = plane_info.plane_size;
3746         dc_plane_state->rotation = plane_info.rotation;
3747         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
3748         dc_plane_state->stereo_format = plane_info.stereo_format;
3749         dc_plane_state->tiling_info = plane_info.tiling_info;
3750         dc_plane_state->visible = plane_info.visible;
3751         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
3752         dc_plane_state->global_alpha = plane_info.global_alpha;
3753         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
3754         dc_plane_state->dcc = plane_info.dcc;
3755         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
3756
3757         /*
3758          * Always set input transfer function, since plane state is refreshed
3759          * every time.
3760          */
3761         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
3762         if (ret)
3763                 return ret;
3764
3765         return 0;
3766 }
3767
3768 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
3769                                            const struct dm_connector_state *dm_state,
3770                                            struct dc_stream_state *stream)
3771 {
3772         enum amdgpu_rmx_type rmx_type;
3773
3774         struct rect src = { 0 }; /* viewport in composition space*/
3775         struct rect dst = { 0 }; /* stream addressable area */
3776
3777         /* no mode. nothing to be done */
3778         if (!mode)
3779                 return;
3780
3781         /* Full screen scaling by default */
3782         src.width = mode->hdisplay;
3783         src.height = mode->vdisplay;
3784         dst.width = stream->timing.h_addressable;
3785         dst.height = stream->timing.v_addressable;
3786
3787         if (dm_state) {
3788                 rmx_type = dm_state->scaling;
3789                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
3790                         if (src.width * dst.height <
3791                                         src.height * dst.width) {
3792                                 /* height needs less upscaling/more downscaling */
3793                                 dst.width = src.width *
3794                                                 dst.height / src.height;
3795                         } else {
3796                                 /* width needs less upscaling/more downscaling */
3797                                 dst.height = src.height *
3798                                                 dst.width / src.width;
3799                         }
3800                 } else if (rmx_type == RMX_CENTER) {
3801                         dst = src;
3802                 }
3803
3804                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
3805                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
3806
3807                 if (dm_state->underscan_enable) {
3808                         dst.x += dm_state->underscan_hborder / 2;
3809                         dst.y += dm_state->underscan_vborder / 2;
3810                         dst.width -= dm_state->underscan_hborder;
3811                         dst.height -= dm_state->underscan_vborder;
3812                 }
3813         }
3814
3815         stream->src = src;
3816         stream->dst = dst;
3817
3818         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
3819                         dst.x, dst.y, dst.width, dst.height);
3820
3821 }
3822
3823 static enum dc_color_depth
3824 convert_color_depth_from_display_info(const struct drm_connector *connector,
3825                                       const struct drm_connector_state *state,
3826                                       bool is_y420)
3827 {
3828         uint8_t bpc;
3829
3830         if (is_y420) {
3831                 bpc = 8;
3832
3833                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
3834                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
3835                         bpc = 16;
3836                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
3837                         bpc = 12;
3838                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
3839                         bpc = 10;
3840         } else {
3841                 bpc = (uint8_t)connector->display_info.bpc;
3842                 /* Assume 8 bpc by default if no bpc is specified. */
3843                 bpc = bpc ? bpc : 8;
3844         }
3845
3846         if (!state)
3847                 state = connector->state;
3848
3849         if (state) {
3850                 /*
3851                  * Cap display bpc based on the user requested value.
3852                  *
3853                  * The value for state->max_bpc may not correctly updated
3854                  * depending on when the connector gets added to the state
3855                  * or if this was called outside of atomic check, so it
3856                  * can't be used directly.
3857                  */
3858                 bpc = min(bpc, state->max_requested_bpc);
3859
3860                 /* Round down to the nearest even number. */
3861                 bpc = bpc - (bpc & 1);
3862         }
3863
3864         switch (bpc) {
3865         case 0:
3866                 /*
3867                  * Temporary Work around, DRM doesn't parse color depth for
3868                  * EDID revision before 1.4
3869                  * TODO: Fix edid parsing
3870                  */
3871                 return COLOR_DEPTH_888;
3872         case 6:
3873                 return COLOR_DEPTH_666;
3874         case 8:
3875                 return COLOR_DEPTH_888;
3876         case 10:
3877                 return COLOR_DEPTH_101010;
3878         case 12:
3879                 return COLOR_DEPTH_121212;
3880         case 14:
3881                 return COLOR_DEPTH_141414;
3882         case 16:
3883                 return COLOR_DEPTH_161616;
3884         default:
3885                 return COLOR_DEPTH_UNDEFINED;
3886         }
3887 }
3888
3889 static enum dc_aspect_ratio
3890 get_aspect_ratio(const struct drm_display_mode *mode_in)
3891 {
3892         /* 1-1 mapping, since both enums follow the HDMI spec. */
3893         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
3894 }
3895
3896 static enum dc_color_space
3897 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
3898 {
3899         enum dc_color_space color_space = COLOR_SPACE_SRGB;
3900
3901         switch (dc_crtc_timing->pixel_encoding) {
3902         case PIXEL_ENCODING_YCBCR422:
3903         case PIXEL_ENCODING_YCBCR444:
3904         case PIXEL_ENCODING_YCBCR420:
3905         {
3906                 /*
3907                  * 27030khz is the separation point between HDTV and SDTV
3908                  * according to HDMI spec, we use YCbCr709 and YCbCr601
3909                  * respectively
3910                  */
3911                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
3912                         if (dc_crtc_timing->flags.Y_ONLY)
3913                                 color_space =
3914                                         COLOR_SPACE_YCBCR709_LIMITED;
3915                         else
3916                                 color_space = COLOR_SPACE_YCBCR709;
3917                 } else {
3918                         if (dc_crtc_timing->flags.Y_ONLY)
3919                                 color_space =
3920                                         COLOR_SPACE_YCBCR601_LIMITED;
3921                         else
3922                                 color_space = COLOR_SPACE_YCBCR601;
3923                 }
3924
3925         }
3926         break;
3927         case PIXEL_ENCODING_RGB:
3928                 color_space = COLOR_SPACE_SRGB;
3929                 break;
3930
3931         default:
3932                 WARN_ON(1);
3933                 break;
3934         }
3935
3936         return color_space;
3937 }
3938
3939 static bool adjust_colour_depth_from_display_info(
3940         struct dc_crtc_timing *timing_out,
3941         const struct drm_display_info *info)
3942 {
3943         enum dc_color_depth depth = timing_out->display_color_depth;
3944         int normalized_clk;
3945         do {
3946                 normalized_clk = timing_out->pix_clk_100hz / 10;
3947                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
3948                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
3949                         normalized_clk /= 2;
3950                 /* Adjusting pix clock following on HDMI spec based on colour depth */
3951                 switch (depth) {
3952                 case COLOR_DEPTH_888:
3953                         break;
3954                 case COLOR_DEPTH_101010:
3955                         normalized_clk = (normalized_clk * 30) / 24;
3956                         break;
3957                 case COLOR_DEPTH_121212:
3958                         normalized_clk = (normalized_clk * 36) / 24;
3959                         break;
3960                 case COLOR_DEPTH_161616:
3961                         normalized_clk = (normalized_clk * 48) / 24;
3962                         break;
3963                 default:
3964                         /* The above depths are the only ones valid for HDMI. */
3965                         return false;
3966                 }
3967                 if (normalized_clk <= info->max_tmds_clock) {
3968                         timing_out->display_color_depth = depth;
3969                         return true;
3970                 }
3971         } while (--depth > COLOR_DEPTH_666);
3972         return false;
3973 }
3974
3975 static void fill_stream_properties_from_drm_display_mode(
3976         struct dc_stream_state *stream,
3977         const struct drm_display_mode *mode_in,
3978         const struct drm_connector *connector,
3979         const struct drm_connector_state *connector_state,
3980         const struct dc_stream_state *old_stream)
3981 {
3982         struct dc_crtc_timing *timing_out = &stream->timing;
3983         const struct drm_display_info *info = &connector->display_info;
3984         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
3985         struct hdmi_vendor_infoframe hv_frame;
3986         struct hdmi_avi_infoframe avi_frame;
3987
3988         memset(&hv_frame, 0, sizeof(hv_frame));
3989         memset(&avi_frame, 0, sizeof(avi_frame));
3990
3991         timing_out->h_border_left = 0;
3992         timing_out->h_border_right = 0;
3993         timing_out->v_border_top = 0;
3994         timing_out->v_border_bottom = 0;
3995         /* TODO: un-hardcode */
3996         if (drm_mode_is_420_only(info, mode_in)
3997                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
3998                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
3999         else if (drm_mode_is_420_also(info, mode_in)
4000                         && aconnector->force_yuv420_output)
4001                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4002         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4003                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4004                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4005         else
4006                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4007
4008         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4009         timing_out->display_color_depth = convert_color_depth_from_display_info(
4010                 connector, connector_state,
4011                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
4012         timing_out->scan_type = SCANNING_TYPE_NODATA;
4013         timing_out->hdmi_vic = 0;
4014
4015         if(old_stream) {
4016                 timing_out->vic = old_stream->timing.vic;
4017                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4018                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4019         } else {
4020                 timing_out->vic = drm_match_cea_mode(mode_in);
4021                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4022                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4023                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4024                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4025         }
4026
4027         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4028                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4029                 timing_out->vic = avi_frame.video_code;
4030                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4031                 timing_out->hdmi_vic = hv_frame.vic;
4032         }
4033
4034         timing_out->h_addressable = mode_in->crtc_hdisplay;
4035         timing_out->h_total = mode_in->crtc_htotal;
4036         timing_out->h_sync_width =
4037                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4038         timing_out->h_front_porch =
4039                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4040         timing_out->v_total = mode_in->crtc_vtotal;
4041         timing_out->v_addressable = mode_in->crtc_vdisplay;
4042         timing_out->v_front_porch =
4043                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4044         timing_out->v_sync_width =
4045                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4046         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4047         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4048
4049         stream->output_color_space = get_output_color_space(timing_out);
4050
4051         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4052         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4053         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4054                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4055                     drm_mode_is_420_also(info, mode_in) &&
4056                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4057                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4058                         adjust_colour_depth_from_display_info(timing_out, info);
4059                 }
4060         }
4061 }
4062
4063 static void fill_audio_info(struct audio_info *audio_info,
4064                             const struct drm_connector *drm_connector,
4065                             const struct dc_sink *dc_sink)
4066 {
4067         int i = 0;
4068         int cea_revision = 0;
4069         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4070
4071         audio_info->manufacture_id = edid_caps->manufacturer_id;
4072         audio_info->product_id = edid_caps->product_id;
4073
4074         cea_revision = drm_connector->display_info.cea_rev;
4075
4076         strscpy(audio_info->display_name,
4077                 edid_caps->display_name,
4078                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4079
4080         if (cea_revision >= 3) {
4081                 audio_info->mode_count = edid_caps->audio_mode_count;
4082
4083                 for (i = 0; i < audio_info->mode_count; ++i) {
4084                         audio_info->modes[i].format_code =
4085                                         (enum audio_format_code)
4086                                         (edid_caps->audio_modes[i].format_code);
4087                         audio_info->modes[i].channel_count =
4088                                         edid_caps->audio_modes[i].channel_count;
4089                         audio_info->modes[i].sample_rates.all =
4090                                         edid_caps->audio_modes[i].sample_rate;
4091                         audio_info->modes[i].sample_size =
4092                                         edid_caps->audio_modes[i].sample_size;
4093                 }
4094         }
4095
4096         audio_info->flags.all = edid_caps->speaker_flags;
4097
4098         /* TODO: We only check for the progressive mode, check for interlace mode too */
4099         if (drm_connector->latency_present[0]) {
4100                 audio_info->video_latency = drm_connector->video_latency[0];
4101                 audio_info->audio_latency = drm_connector->audio_latency[0];
4102         }
4103
4104         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4105
4106 }
4107
4108 static void
4109 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4110                                       struct drm_display_mode *dst_mode)
4111 {
4112         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4113         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4114         dst_mode->crtc_clock = src_mode->crtc_clock;
4115         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4116         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4117         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4118         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4119         dst_mode->crtc_htotal = src_mode->crtc_htotal;
4120         dst_mode->crtc_hskew = src_mode->crtc_hskew;
4121         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4122         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4123         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4124         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4125         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4126 }
4127
4128 static void
4129 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4130                                         const struct drm_display_mode *native_mode,
4131                                         bool scale_enabled)
4132 {
4133         if (scale_enabled) {
4134                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4135         } else if (native_mode->clock == drm_mode->clock &&
4136                         native_mode->htotal == drm_mode->htotal &&
4137                         native_mode->vtotal == drm_mode->vtotal) {
4138                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4139         } else {
4140                 /* no scaling nor amdgpu inserted, no need to patch */
4141         }
4142 }
4143
4144 static struct dc_sink *
4145 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4146 {
4147         struct dc_sink_init_data sink_init_data = { 0 };
4148         struct dc_sink *sink = NULL;
4149         sink_init_data.link = aconnector->dc_link;
4150         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4151
4152         sink = dc_sink_create(&sink_init_data);
4153         if (!sink) {
4154                 DRM_ERROR("Failed to create sink!\n");
4155                 return NULL;
4156         }
4157         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4158
4159         return sink;
4160 }
4161
4162 static void set_multisync_trigger_params(
4163                 struct dc_stream_state *stream)
4164 {
4165         if (stream->triggered_crtc_reset.enabled) {
4166                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4167                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4168         }
4169 }
4170
4171 static void set_master_stream(struct dc_stream_state *stream_set[],
4172                               int stream_count)
4173 {
4174         int j, highest_rfr = 0, master_stream = 0;
4175
4176         for (j = 0;  j < stream_count; j++) {
4177                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4178                         int refresh_rate = 0;
4179
4180                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4181                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4182                         if (refresh_rate > highest_rfr) {
4183                                 highest_rfr = refresh_rate;
4184                                 master_stream = j;
4185                         }
4186                 }
4187         }
4188         for (j = 0;  j < stream_count; j++) {
4189                 if (stream_set[j])
4190                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4191         }
4192 }
4193
4194 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4195 {
4196         int i = 0;
4197
4198         if (context->stream_count < 2)
4199                 return;
4200         for (i = 0; i < context->stream_count ; i++) {
4201                 if (!context->streams[i])
4202                         continue;
4203                 /*
4204                  * TODO: add a function to read AMD VSDB bits and set
4205                  * crtc_sync_master.multi_sync_enabled flag
4206                  * For now it's set to false
4207                  */
4208                 set_multisync_trigger_params(context->streams[i]);
4209         }
4210         set_master_stream(context->streams, context->stream_count);
4211 }
4212
4213 static struct dc_stream_state *
4214 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4215                        const struct drm_display_mode *drm_mode,
4216                        const struct dm_connector_state *dm_state,
4217                        const struct dc_stream_state *old_stream)
4218 {
4219         struct drm_display_mode *preferred_mode = NULL;
4220         struct drm_connector *drm_connector;
4221         const struct drm_connector_state *con_state =
4222                 dm_state ? &dm_state->base : NULL;
4223         struct dc_stream_state *stream = NULL;
4224         struct drm_display_mode mode = *drm_mode;
4225         bool native_mode_found = false;
4226         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4227         int mode_refresh;
4228         int preferred_refresh = 0;
4229 #if defined(CONFIG_DRM_AMD_DC_DCN)
4230         struct dsc_dec_dpcd_caps dsc_caps;
4231 #endif
4232         uint32_t link_bandwidth_kbps;
4233
4234         struct dc_sink *sink = NULL;
4235         if (aconnector == NULL) {
4236                 DRM_ERROR("aconnector is NULL!\n");
4237                 return stream;
4238         }
4239
4240         drm_connector = &aconnector->base;
4241
4242         if (!aconnector->dc_sink) {
4243                 sink = create_fake_sink(aconnector);
4244                 if (!sink)
4245                         return stream;
4246         } else {
4247                 sink = aconnector->dc_sink;
4248                 dc_sink_retain(sink);
4249         }
4250
4251         stream = dc_create_stream_for_sink(sink);
4252
4253         if (stream == NULL) {
4254                 DRM_ERROR("Failed to create stream for sink!\n");
4255                 goto finish;
4256         }
4257
4258         stream->dm_stream_context = aconnector;
4259
4260         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4261                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4262
4263         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4264                 /* Search for preferred mode */
4265                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4266                         native_mode_found = true;
4267                         break;
4268                 }
4269         }
4270         if (!native_mode_found)
4271                 preferred_mode = list_first_entry_or_null(
4272                                 &aconnector->base.modes,
4273                                 struct drm_display_mode,
4274                                 head);
4275
4276         mode_refresh = drm_mode_vrefresh(&mode);
4277
4278         if (preferred_mode == NULL) {
4279                 /*
4280                  * This may not be an error, the use case is when we have no
4281                  * usermode calls to reset and set mode upon hotplug. In this
4282                  * case, we call set mode ourselves to restore the previous mode
4283                  * and the modelist may not be filled in in time.
4284                  */
4285                 DRM_DEBUG_DRIVER("No preferred mode found\n");
4286         } else {
4287                 decide_crtc_timing_for_drm_display_mode(
4288                                 &mode, preferred_mode,
4289                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
4290                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
4291         }
4292
4293         if (!dm_state)
4294                 drm_mode_set_crtcinfo(&mode, 0);
4295
4296         /*
4297         * If scaling is enabled and refresh rate didn't change
4298         * we copy the vic and polarities of the old timings
4299         */
4300         if (!scale || mode_refresh != preferred_refresh)
4301                 fill_stream_properties_from_drm_display_mode(stream,
4302                         &mode, &aconnector->base, con_state, NULL);
4303         else
4304                 fill_stream_properties_from_drm_display_mode(stream,
4305                         &mode, &aconnector->base, con_state, old_stream);
4306
4307         stream->timing.flags.DSC = 0;
4308
4309         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4310 #if defined(CONFIG_DRM_AMD_DC_DCN)
4311                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4312                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4313                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw,
4314                                       &dsc_caps);
4315 #endif
4316                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4317                                                              dc_link_get_link_cap(aconnector->dc_link));
4318
4319 #if defined(CONFIG_DRM_AMD_DC_DCN)
4320                 if (dsc_caps.is_dsc_supported)
4321                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4322                                                   &dsc_caps,
4323                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4324                                                   link_bandwidth_kbps,
4325                                                   &stream->timing,
4326                                                   &stream->timing.dsc_cfg))
4327                                 stream->timing.flags.DSC = 1;
4328 #endif
4329         }
4330
4331         update_stream_scaling_settings(&mode, dm_state, stream);
4332
4333         fill_audio_info(
4334                 &stream->audio_info,
4335                 drm_connector,
4336                 sink);
4337
4338         update_stream_signal(stream, sink);
4339
4340         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4341                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
4342         if (stream->link->psr_settings.psr_feature_enabled)     {
4343                 struct dc  *core_dc = stream->link->ctx->dc;
4344
4345                 if (dc_is_dmcu_initialized(core_dc)) {
4346                         //
4347                         // should decide stream support vsc sdp colorimetry capability
4348                         // before building vsc info packet
4349                         //
4350                         stream->use_vsc_sdp_for_colorimetry = false;
4351                         if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4352                                 stream->use_vsc_sdp_for_colorimetry =
4353                                         aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4354                         } else {
4355                                 if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
4356                                         stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
4357                                         stream->use_vsc_sdp_for_colorimetry = true;
4358                                 }
4359                         }
4360                         mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4361                 }
4362         }
4363 finish:
4364         dc_sink_release(sink);
4365
4366         return stream;
4367 }
4368
4369 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4370 {
4371         drm_crtc_cleanup(crtc);
4372         kfree(crtc);
4373 }
4374
4375 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4376                                   struct drm_crtc_state *state)
4377 {
4378         struct dm_crtc_state *cur = to_dm_crtc_state(state);
4379
4380         /* TODO Destroy dc_stream objects are stream object is flattened */
4381         if (cur->stream)
4382                 dc_stream_release(cur->stream);
4383
4384
4385         __drm_atomic_helper_crtc_destroy_state(state);
4386
4387
4388         kfree(state);
4389 }
4390
4391 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4392 {
4393         struct dm_crtc_state *state;
4394
4395         if (crtc->state)
4396                 dm_crtc_destroy_state(crtc, crtc->state);
4397
4398         state = kzalloc(sizeof(*state), GFP_KERNEL);
4399         if (WARN_ON(!state))
4400                 return;
4401
4402         crtc->state = &state->base;
4403         crtc->state->crtc = crtc;
4404
4405 }
4406
4407 static struct drm_crtc_state *
4408 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4409 {
4410         struct dm_crtc_state *state, *cur;
4411
4412         cur = to_dm_crtc_state(crtc->state);
4413
4414         if (WARN_ON(!crtc->state))
4415                 return NULL;
4416
4417         state = kzalloc(sizeof(*state), GFP_KERNEL);
4418         if (!state)
4419                 return NULL;
4420
4421         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4422
4423         if (cur->stream) {
4424                 state->stream = cur->stream;
4425                 dc_stream_retain(state->stream);
4426         }
4427
4428         state->active_planes = cur->active_planes;
4429         state->interrupts_enabled = cur->interrupts_enabled;
4430         state->vrr_params = cur->vrr_params;
4431         state->vrr_infopacket = cur->vrr_infopacket;
4432         state->abm_level = cur->abm_level;
4433         state->vrr_supported = cur->vrr_supported;
4434         state->freesync_config = cur->freesync_config;
4435         state->crc_src = cur->crc_src;
4436         state->cm_has_degamma = cur->cm_has_degamma;
4437         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4438
4439         /* TODO Duplicate dc_stream after objects are stream object is flattened */
4440
4441         return &state->base;
4442 }
4443
4444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4445 {
4446         enum dc_irq_source irq_source;
4447         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4448         struct amdgpu_device *adev = crtc->dev->dev_private;
4449         int rc;
4450
4451         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4452
4453         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4454
4455         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4456                          acrtc->crtc_id, enable ? "en" : "dis", rc);
4457         return rc;
4458 }
4459
4460 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4461 {
4462         enum dc_irq_source irq_source;
4463         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4464         struct amdgpu_device *adev = crtc->dev->dev_private;
4465         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4466         int rc = 0;
4467
4468         if (enable) {
4469                 /* vblank irq on -> Only need vupdate irq in vrr mode */
4470                 if (amdgpu_dm_vrr_active(acrtc_state))
4471                         rc = dm_set_vupdate_irq(crtc, true);
4472         } else {
4473                 /* vblank irq off -> vupdate irq off */
4474                 rc = dm_set_vupdate_irq(crtc, false);
4475         }
4476
4477         if (rc)
4478                 return rc;
4479
4480         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4481         return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4482 }
4483
4484 static int dm_enable_vblank(struct drm_crtc *crtc)
4485 {
4486         return dm_set_vblank(crtc, true);
4487 }
4488
4489 static void dm_disable_vblank(struct drm_crtc *crtc)
4490 {
4491         dm_set_vblank(crtc, false);
4492 }
4493
4494 /* Implemented only the options currently availible for the driver */
4495 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4496         .reset = dm_crtc_reset_state,
4497         .destroy = amdgpu_dm_crtc_destroy,
4498         .gamma_set = drm_atomic_helper_legacy_gamma_set,
4499         .set_config = drm_atomic_helper_set_config,
4500         .page_flip = drm_atomic_helper_page_flip,
4501         .atomic_duplicate_state = dm_crtc_duplicate_state,
4502         .atomic_destroy_state = dm_crtc_destroy_state,
4503         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
4504         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4505         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4506         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
4507         .enable_vblank = dm_enable_vblank,
4508         .disable_vblank = dm_disable_vblank,
4509         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4510 };
4511
4512 static enum drm_connector_status
4513 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4514 {
4515         bool connected;
4516         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4517
4518         /*
4519          * Notes:
4520          * 1. This interface is NOT called in context of HPD irq.
4521          * 2. This interface *is called* in context of user-mode ioctl. Which
4522          * makes it a bad place for *any* MST-related activity.
4523          */
4524
4525         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4526             !aconnector->fake_enable)
4527                 connected = (aconnector->dc_sink != NULL);
4528         else
4529                 connected = (aconnector->base.force == DRM_FORCE_ON);
4530
4531         return (connected ? connector_status_connected :
4532                         connector_status_disconnected);
4533 }
4534
4535 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4536                                             struct drm_connector_state *connector_state,
4537                                             struct drm_property *property,
4538                                             uint64_t val)
4539 {
4540         struct drm_device *dev = connector->dev;
4541         struct amdgpu_device *adev = dev->dev_private;
4542         struct dm_connector_state *dm_old_state =
4543                 to_dm_connector_state(connector->state);
4544         struct dm_connector_state *dm_new_state =
4545                 to_dm_connector_state(connector_state);
4546
4547         int ret = -EINVAL;
4548
4549         if (property == dev->mode_config.scaling_mode_property) {
4550                 enum amdgpu_rmx_type rmx_type;
4551
4552                 switch (val) {
4553                 case DRM_MODE_SCALE_CENTER:
4554                         rmx_type = RMX_CENTER;
4555                         break;
4556                 case DRM_MODE_SCALE_ASPECT:
4557                         rmx_type = RMX_ASPECT;
4558                         break;
4559                 case DRM_MODE_SCALE_FULLSCREEN:
4560                         rmx_type = RMX_FULL;
4561                         break;
4562                 case DRM_MODE_SCALE_NONE:
4563                 default:
4564                         rmx_type = RMX_OFF;
4565                         break;
4566                 }
4567
4568                 if (dm_old_state->scaling == rmx_type)
4569                         return 0;
4570
4571                 dm_new_state->scaling = rmx_type;
4572                 ret = 0;
4573         } else if (property == adev->mode_info.underscan_hborder_property) {
4574                 dm_new_state->underscan_hborder = val;
4575                 ret = 0;
4576         } else if (property == adev->mode_info.underscan_vborder_property) {
4577                 dm_new_state->underscan_vborder = val;
4578                 ret = 0;
4579         } else if (property == adev->mode_info.underscan_property) {
4580                 dm_new_state->underscan_enable = val;
4581                 ret = 0;
4582         } else if (property == adev->mode_info.abm_level_property) {
4583                 dm_new_state->abm_level = val;
4584                 ret = 0;
4585         }
4586
4587         return ret;
4588 }
4589
4590 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
4591                                             const struct drm_connector_state *state,
4592                                             struct drm_property *property,
4593                                             uint64_t *val)
4594 {
4595         struct drm_device *dev = connector->dev;
4596         struct amdgpu_device *adev = dev->dev_private;
4597         struct dm_connector_state *dm_state =
4598                 to_dm_connector_state(state);
4599         int ret = -EINVAL;
4600
4601         if (property == dev->mode_config.scaling_mode_property) {
4602                 switch (dm_state->scaling) {
4603                 case RMX_CENTER:
4604                         *val = DRM_MODE_SCALE_CENTER;
4605                         break;
4606                 case RMX_ASPECT:
4607                         *val = DRM_MODE_SCALE_ASPECT;
4608                         break;
4609                 case RMX_FULL:
4610                         *val = DRM_MODE_SCALE_FULLSCREEN;
4611                         break;
4612                 case RMX_OFF:
4613                 default:
4614                         *val = DRM_MODE_SCALE_NONE;
4615                         break;
4616                 }
4617                 ret = 0;
4618         } else if (property == adev->mode_info.underscan_hborder_property) {
4619                 *val = dm_state->underscan_hborder;
4620                 ret = 0;
4621         } else if (property == adev->mode_info.underscan_vborder_property) {
4622                 *val = dm_state->underscan_vborder;
4623                 ret = 0;
4624         } else if (property == adev->mode_info.underscan_property) {
4625                 *val = dm_state->underscan_enable;
4626                 ret = 0;
4627         } else if (property == adev->mode_info.abm_level_property) {
4628                 *val = dm_state->abm_level;
4629                 ret = 0;
4630         }
4631
4632         return ret;
4633 }
4634
4635 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
4636 {
4637         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
4638
4639         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
4640 }
4641
4642 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
4643 {
4644         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4645         const struct dc_link *link = aconnector->dc_link;
4646         struct amdgpu_device *adev = connector->dev->dev_private;
4647         struct amdgpu_display_manager *dm = &adev->dm;
4648
4649 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4650         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4651
4652         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4653             link->type != dc_connection_none &&
4654             dm->backlight_dev) {
4655                 backlight_device_unregister(dm->backlight_dev);
4656                 dm->backlight_dev = NULL;
4657         }
4658 #endif
4659
4660         if (aconnector->dc_em_sink)
4661                 dc_sink_release(aconnector->dc_em_sink);
4662         aconnector->dc_em_sink = NULL;
4663         if (aconnector->dc_sink)
4664                 dc_sink_release(aconnector->dc_sink);
4665         aconnector->dc_sink = NULL;
4666
4667         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
4668         drm_connector_unregister(connector);
4669         drm_connector_cleanup(connector);
4670         if (aconnector->i2c) {
4671                 i2c_del_adapter(&aconnector->i2c->base);
4672                 kfree(aconnector->i2c);
4673         }
4674         kfree(aconnector->dm_dp_aux.aux.name);
4675
4676         kfree(connector);
4677 }
4678
4679 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
4680 {
4681         struct dm_connector_state *state =
4682                 to_dm_connector_state(connector->state);
4683
4684         if (connector->state)
4685                 __drm_atomic_helper_connector_destroy_state(connector->state);
4686
4687         kfree(state);
4688
4689         state = kzalloc(sizeof(*state), GFP_KERNEL);
4690
4691         if (state) {
4692                 state->scaling = RMX_OFF;
4693                 state->underscan_enable = false;
4694                 state->underscan_hborder = 0;
4695                 state->underscan_vborder = 0;
4696                 state->base.max_requested_bpc = 8;
4697                 state->vcpi_slots = 0;
4698                 state->pbn = 0;
4699                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4700                         state->abm_level = amdgpu_dm_abm_level;
4701
4702                 __drm_atomic_helper_connector_reset(connector, &state->base);
4703         }
4704 }
4705
4706 struct drm_connector_state *
4707 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
4708 {
4709         struct dm_connector_state *state =
4710                 to_dm_connector_state(connector->state);
4711
4712         struct dm_connector_state *new_state =
4713                         kmemdup(state, sizeof(*state), GFP_KERNEL);
4714
4715         if (!new_state)
4716                 return NULL;
4717
4718         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
4719
4720         new_state->freesync_capable = state->freesync_capable;
4721         new_state->abm_level = state->abm_level;
4722         new_state->scaling = state->scaling;
4723         new_state->underscan_enable = state->underscan_enable;
4724         new_state->underscan_hborder = state->underscan_hborder;
4725         new_state->underscan_vborder = state->underscan_vborder;
4726         new_state->vcpi_slots = state->vcpi_slots;
4727         new_state->pbn = state->pbn;
4728         return &new_state->base;
4729 }
4730
4731 static int
4732 amdgpu_dm_connector_late_register(struct drm_connector *connector)
4733 {
4734 #if defined(CONFIG_DEBUG_FS)
4735         struct amdgpu_dm_connector *amdgpu_dm_connector =
4736                 to_amdgpu_dm_connector(connector);
4737         int r;
4738
4739         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
4740             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
4741                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
4742                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
4743                 if (r)
4744                         return r;
4745         }
4746
4747         connector_debugfs_init(amdgpu_dm_connector);
4748 #endif
4749
4750         return 0;
4751 }
4752
4753 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
4754         .reset = amdgpu_dm_connector_funcs_reset,
4755         .detect = amdgpu_dm_connector_detect,
4756         .fill_modes = drm_helper_probe_single_connector_modes,
4757         .destroy = amdgpu_dm_connector_destroy,
4758         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
4759         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4760         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
4761         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
4762         .late_register = amdgpu_dm_connector_late_register,
4763         .early_unregister = amdgpu_dm_connector_unregister
4764 };
4765
4766 static int get_modes(struct drm_connector *connector)
4767 {
4768         return amdgpu_dm_connector_get_modes(connector);
4769 }
4770
4771 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
4772 {
4773         struct dc_sink_init_data init_params = {
4774                         .link = aconnector->dc_link,
4775                         .sink_signal = SIGNAL_TYPE_VIRTUAL
4776         };
4777         struct edid *edid;
4778
4779         if (!aconnector->base.edid_blob_ptr) {
4780                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
4781                                 aconnector->base.name);
4782
4783                 aconnector->base.force = DRM_FORCE_OFF;
4784                 aconnector->base.override_edid = false;
4785                 return;
4786         }
4787
4788         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
4789
4790         aconnector->edid = edid;
4791
4792         aconnector->dc_em_sink = dc_link_add_remote_sink(
4793                 aconnector->dc_link,
4794                 (uint8_t *)edid,
4795                 (edid->extensions + 1) * EDID_LENGTH,
4796                 &init_params);
4797
4798         if (aconnector->base.force == DRM_FORCE_ON) {
4799                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
4800                 aconnector->dc_link->local_sink :
4801                 aconnector->dc_em_sink;
4802                 dc_sink_retain(aconnector->dc_sink);
4803         }
4804 }
4805
4806 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
4807 {
4808         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
4809
4810         /*
4811          * In case of headless boot with force on for DP managed connector
4812          * Those settings have to be != 0 to get initial modeset
4813          */
4814         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4815                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
4816                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
4817         }
4818
4819
4820         aconnector->base.override_edid = true;
4821         create_eml_sink(aconnector);
4822 }
4823
4824 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
4825                                    struct drm_display_mode *mode)
4826 {
4827         int result = MODE_ERROR;
4828         struct dc_sink *dc_sink;
4829         struct amdgpu_device *adev = connector->dev->dev_private;
4830         /* TODO: Unhardcode stream count */
4831         struct dc_stream_state *stream;
4832         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4833         enum dc_status dc_result = DC_OK;
4834
4835         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
4836                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
4837                 return result;
4838
4839         /*
4840          * Only run this the first time mode_valid is called to initilialize
4841          * EDID mgmt
4842          */
4843         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
4844                 !aconnector->dc_em_sink)
4845                 handle_edid_mgmt(aconnector);
4846
4847         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
4848
4849         if (dc_sink == NULL) {
4850                 DRM_ERROR("dc_sink is NULL!\n");
4851                 goto fail;
4852         }
4853
4854         stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
4855         if (stream == NULL) {
4856                 DRM_ERROR("Failed to create stream for sink!\n");
4857                 goto fail;
4858         }
4859
4860         dc_result = dc_validate_stream(adev->dm.dc, stream);
4861
4862         if (dc_result == DC_OK)
4863                 result = MODE_OK;
4864         else
4865                 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
4866                               mode->hdisplay,
4867                               mode->vdisplay,
4868                               mode->clock,
4869                               dc_result);
4870
4871         dc_stream_release(stream);
4872
4873 fail:
4874         /* TODO: error handling*/
4875         return result;
4876 }
4877
4878 static int fill_hdr_info_packet(const struct drm_connector_state *state,
4879                                 struct dc_info_packet *out)
4880 {
4881         struct hdmi_drm_infoframe frame;
4882         unsigned char buf[30]; /* 26 + 4 */
4883         ssize_t len;
4884         int ret, i;
4885
4886         memset(out, 0, sizeof(*out));
4887
4888         if (!state->hdr_output_metadata)
4889                 return 0;
4890
4891         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
4892         if (ret)
4893                 return ret;
4894
4895         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
4896         if (len < 0)
4897                 return (int)len;
4898
4899         /* Static metadata is a fixed 26 bytes + 4 byte header. */
4900         if (len != 30)
4901                 return -EINVAL;
4902
4903         /* Prepare the infopacket for DC. */
4904         switch (state->connector->connector_type) {
4905         case DRM_MODE_CONNECTOR_HDMIA:
4906                 out->hb0 = 0x87; /* type */
4907                 out->hb1 = 0x01; /* version */
4908                 out->hb2 = 0x1A; /* length */
4909                 out->sb[0] = buf[3]; /* checksum */
4910                 i = 1;
4911                 break;
4912
4913         case DRM_MODE_CONNECTOR_DisplayPort:
4914         case DRM_MODE_CONNECTOR_eDP:
4915                 out->hb0 = 0x00; /* sdp id, zero */
4916                 out->hb1 = 0x87; /* type */
4917                 out->hb2 = 0x1D; /* payload len - 1 */
4918                 out->hb3 = (0x13 << 2); /* sdp version */
4919                 out->sb[0] = 0x01; /* version */
4920                 out->sb[1] = 0x1A; /* length */
4921                 i = 2;
4922                 break;
4923
4924         default:
4925                 return -EINVAL;
4926         }
4927
4928         memcpy(&out->sb[i], &buf[4], 26);
4929         out->valid = true;
4930
4931         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
4932                        sizeof(out->sb), false);
4933
4934         return 0;
4935 }
4936
4937 static bool
4938 is_hdr_metadata_different(const struct drm_connector_state *old_state,
4939                           const struct drm_connector_state *new_state)
4940 {
4941         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
4942         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
4943
4944         if (old_blob != new_blob) {
4945                 if (old_blob && new_blob &&
4946                     old_blob->length == new_blob->length)
4947                         return memcmp(old_blob->data, new_blob->data,
4948                                       old_blob->length);
4949
4950                 return true;
4951         }
4952
4953         return false;
4954 }
4955
4956 static int
4957 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
4958                                  struct drm_atomic_state *state)
4959 {
4960         struct drm_connector_state *new_con_state =
4961                 drm_atomic_get_new_connector_state(state, conn);
4962         struct drm_connector_state *old_con_state =
4963                 drm_atomic_get_old_connector_state(state, conn);
4964         struct drm_crtc *crtc = new_con_state->crtc;
4965         struct drm_crtc_state *new_crtc_state;
4966         int ret;
4967
4968         if (!crtc)
4969                 return 0;
4970
4971         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
4972                 struct dc_info_packet hdr_infopacket;
4973
4974                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
4975                 if (ret)
4976                         return ret;
4977
4978                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
4979                 if (IS_ERR(new_crtc_state))
4980                         return PTR_ERR(new_crtc_state);
4981
4982                 /*
4983                  * DC considers the stream backends changed if the
4984                  * static metadata changes. Forcing the modeset also
4985                  * gives a simple way for userspace to switch from
4986                  * 8bpc to 10bpc when setting the metadata to enter
4987                  * or exit HDR.
4988                  *
4989                  * Changing the static metadata after it's been
4990                  * set is permissible, however. So only force a
4991                  * modeset if we're entering or exiting HDR.
4992                  */
4993                 new_crtc_state->mode_changed =
4994                         !old_con_state->hdr_output_metadata ||
4995                         !new_con_state->hdr_output_metadata;
4996         }
4997
4998         return 0;
4999 }
5000
5001 static const struct drm_connector_helper_funcs
5002 amdgpu_dm_connector_helper_funcs = {
5003         /*
5004          * If hotplugging a second bigger display in FB Con mode, bigger resolution
5005          * modes will be filtered by drm_mode_validate_size(), and those modes
5006          * are missing after user start lightdm. So we need to renew modes list.
5007          * in get_modes call back, not just return the modes count
5008          */
5009         .get_modes = get_modes,
5010         .mode_valid = amdgpu_dm_connector_mode_valid,
5011         .atomic_check = amdgpu_dm_connector_atomic_check,
5012 };
5013
5014 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5015 {
5016 }
5017
5018 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
5019 {
5020         struct drm_device *dev = new_crtc_state->crtc->dev;
5021         struct drm_plane *plane;
5022
5023         drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
5024                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5025                         return true;
5026         }
5027
5028         return false;
5029 }
5030
5031 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5032 {
5033         struct drm_atomic_state *state = new_crtc_state->state;
5034         struct drm_plane *plane;
5035         int num_active = 0;
5036
5037         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5038                 struct drm_plane_state *new_plane_state;
5039
5040                 /* Cursor planes are "fake". */
5041                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5042                         continue;
5043
5044                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5045
5046                 if (!new_plane_state) {
5047                         /*
5048                          * The plane is enable on the CRTC and hasn't changed
5049                          * state. This means that it previously passed
5050                          * validation and is therefore enabled.
5051                          */
5052                         num_active += 1;
5053                         continue;
5054                 }
5055
5056                 /* We need a framebuffer to be considered enabled. */
5057                 num_active += (new_plane_state->fb != NULL);
5058         }
5059
5060         return num_active;
5061 }
5062
5063 /*
5064  * Sets whether interrupts should be enabled on a specific CRTC.
5065  * We require that the stream be enabled and that there exist active
5066  * DC planes on the stream.
5067  */
5068 static void
5069 dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
5070                                struct drm_crtc_state *new_crtc_state)
5071 {
5072         struct dm_crtc_state *dm_new_crtc_state =
5073                 to_dm_crtc_state(new_crtc_state);
5074
5075         dm_new_crtc_state->active_planes = 0;
5076         dm_new_crtc_state->interrupts_enabled = false;
5077
5078         if (!dm_new_crtc_state->stream)
5079                 return;
5080
5081         dm_new_crtc_state->active_planes =
5082                 count_crtc_active_planes(new_crtc_state);
5083
5084         dm_new_crtc_state->interrupts_enabled =
5085                 dm_new_crtc_state->active_planes > 0;
5086 }
5087
5088 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5089                                        struct drm_crtc_state *state)
5090 {
5091         struct amdgpu_device *adev = crtc->dev->dev_private;
5092         struct dc *dc = adev->dm.dc;
5093         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5094         int ret = -EINVAL;
5095
5096         /*
5097          * Update interrupt state for the CRTC. This needs to happen whenever
5098          * the CRTC has changed or whenever any of its planes have changed.
5099          * Atomic check satisfies both of these requirements since the CRTC
5100          * is added to the state by DRM during drm_atomic_helper_check_planes.
5101          */
5102         dm_update_crtc_interrupt_state(crtc, state);
5103
5104         if (unlikely(!dm_crtc_state->stream &&
5105                      modeset_required(state, NULL, dm_crtc_state->stream))) {
5106                 WARN_ON(1);
5107                 return ret;
5108         }
5109
5110         /* In some use cases, like reset, no stream is attached */
5111         if (!dm_crtc_state->stream)
5112                 return 0;
5113
5114         /*
5115          * We want at least one hardware plane enabled to use
5116          * the stream with a cursor enabled.
5117          */
5118         if (state->enable && state->active &&
5119             does_crtc_have_active_cursor(state) &&
5120             dm_crtc_state->active_planes == 0)
5121                 return -EINVAL;
5122
5123         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5124                 return 0;
5125
5126         return ret;
5127 }
5128
5129 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5130                                       const struct drm_display_mode *mode,
5131                                       struct drm_display_mode *adjusted_mode)
5132 {
5133         return true;
5134 }
5135
5136 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5137         .disable = dm_crtc_helper_disable,
5138         .atomic_check = dm_crtc_helper_atomic_check,
5139         .mode_fixup = dm_crtc_helper_mode_fixup,
5140         .get_scanout_position = amdgpu_crtc_get_scanout_position,
5141 };
5142
5143 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5144 {
5145
5146 }
5147
5148 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5149 {
5150         switch (display_color_depth) {
5151                 case COLOR_DEPTH_666:
5152                         return 6;
5153                 case COLOR_DEPTH_888:
5154                         return 8;
5155                 case COLOR_DEPTH_101010:
5156                         return 10;
5157                 case COLOR_DEPTH_121212:
5158                         return 12;
5159                 case COLOR_DEPTH_141414:
5160                         return 14;
5161                 case COLOR_DEPTH_161616:
5162                         return 16;
5163                 default:
5164                         break;
5165                 }
5166         return 0;
5167 }
5168
5169 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5170                                           struct drm_crtc_state *crtc_state,
5171                                           struct drm_connector_state *conn_state)
5172 {
5173         struct drm_atomic_state *state = crtc_state->state;
5174         struct drm_connector *connector = conn_state->connector;
5175         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5176         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5177         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5178         struct drm_dp_mst_topology_mgr *mst_mgr;
5179         struct drm_dp_mst_port *mst_port;
5180         enum dc_color_depth color_depth;
5181         int clock, bpp = 0;
5182         bool is_y420 = false;
5183
5184         if (!aconnector->port || !aconnector->dc_sink)
5185                 return 0;
5186
5187         mst_port = aconnector->port;
5188         mst_mgr = &aconnector->mst_port->mst_mgr;
5189
5190         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5191                 return 0;
5192
5193         if (!state->duplicated) {
5194                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5195                                 aconnector->force_yuv420_output;
5196                 color_depth = convert_color_depth_from_display_info(connector, conn_state,
5197                                                                     is_y420);
5198                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5199                 clock = adjusted_mode->clock;
5200                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5201         }
5202         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5203                                                                            mst_mgr,
5204                                                                            mst_port,
5205                                                                            dm_new_connector_state->pbn,
5206                                                                            0);
5207         if (dm_new_connector_state->vcpi_slots < 0) {
5208                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5209                 return dm_new_connector_state->vcpi_slots;
5210         }
5211         return 0;
5212 }
5213
5214 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5215         .disable = dm_encoder_helper_disable,
5216         .atomic_check = dm_encoder_helper_atomic_check
5217 };
5218
5219 #if defined(CONFIG_DRM_AMD_DC_DCN)
5220 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5221                                             struct dc_state *dc_state)
5222 {
5223         struct dc_stream_state *stream = NULL;
5224         struct drm_connector *connector;
5225         struct drm_connector_state *new_con_state, *old_con_state;
5226         struct amdgpu_dm_connector *aconnector;
5227         struct dm_connector_state *dm_conn_state;
5228         int i, j, clock, bpp;
5229         int vcpi, pbn_div, pbn = 0;
5230
5231         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5232
5233                 aconnector = to_amdgpu_dm_connector(connector);
5234
5235                 if (!aconnector->port)
5236                         continue;
5237
5238                 if (!new_con_state || !new_con_state->crtc)
5239                         continue;
5240
5241                 dm_conn_state = to_dm_connector_state(new_con_state);
5242
5243                 for (j = 0; j < dc_state->stream_count; j++) {
5244                         stream = dc_state->streams[j];
5245                         if (!stream)
5246                                 continue;
5247
5248                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5249                                 break;
5250
5251                         stream = NULL;
5252                 }
5253
5254                 if (!stream)
5255                         continue;
5256
5257                 if (stream->timing.flags.DSC != 1) {
5258                         drm_dp_mst_atomic_enable_dsc(state,
5259                                                      aconnector->port,
5260                                                      dm_conn_state->pbn,
5261                                                      0,
5262                                                      false);
5263                         continue;
5264                 }
5265
5266                 pbn_div = dm_mst_get_pbn_divider(stream->link);
5267                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
5268                 clock = stream->timing.pix_clk_100hz / 10;
5269                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5270                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
5271                                                     aconnector->port,
5272                                                     pbn, pbn_div,
5273                                                     true);
5274                 if (vcpi < 0)
5275                         return vcpi;
5276
5277                 dm_conn_state->pbn = pbn;
5278                 dm_conn_state->vcpi_slots = vcpi;
5279         }
5280         return 0;
5281 }
5282 #endif
5283
5284 static void dm_drm_plane_reset(struct drm_plane *plane)
5285 {
5286         struct dm_plane_state *amdgpu_state = NULL;
5287
5288         if (plane->state)
5289                 plane->funcs->atomic_destroy_state(plane, plane->state);
5290
5291         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5292         WARN_ON(amdgpu_state == NULL);
5293
5294         if (amdgpu_state)
5295                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5296 }
5297
5298 static struct drm_plane_state *
5299 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5300 {
5301         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5302
5303         old_dm_plane_state = to_dm_plane_state(plane->state);
5304         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5305         if (!dm_plane_state)
5306                 return NULL;
5307
5308         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5309
5310         if (old_dm_plane_state->dc_state) {
5311                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5312                 dc_plane_state_retain(dm_plane_state->dc_state);
5313         }
5314
5315         return &dm_plane_state->base;
5316 }
5317
5318 void dm_drm_plane_destroy_state(struct drm_plane *plane,
5319                                 struct drm_plane_state *state)
5320 {
5321         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5322
5323         if (dm_plane_state->dc_state)
5324                 dc_plane_state_release(dm_plane_state->dc_state);
5325
5326         drm_atomic_helper_plane_destroy_state(plane, state);
5327 }
5328
5329 static const struct drm_plane_funcs dm_plane_funcs = {
5330         .update_plane   = drm_atomic_helper_update_plane,
5331         .disable_plane  = drm_atomic_helper_disable_plane,
5332         .destroy        = drm_primary_helper_destroy,
5333         .reset = dm_drm_plane_reset,
5334         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
5335         .atomic_destroy_state = dm_drm_plane_destroy_state,
5336 };
5337
5338 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5339                                       struct drm_plane_state *new_state)
5340 {
5341         struct amdgpu_framebuffer *afb;
5342         struct drm_gem_object *obj;
5343         struct amdgpu_device *adev;
5344         struct amdgpu_bo *rbo;
5345         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5346         struct list_head list;
5347         struct ttm_validate_buffer tv;
5348         struct ww_acquire_ctx ticket;
5349         uint64_t tiling_flags;
5350         uint32_t domain;
5351         int r;
5352         bool tmz_surface = false;
5353         bool force_disable_dcc = false;
5354
5355         dm_plane_state_old = to_dm_plane_state(plane->state);
5356         dm_plane_state_new = to_dm_plane_state(new_state);
5357
5358         if (!new_state->fb) {
5359                 DRM_DEBUG_DRIVER("No FB bound\n");
5360                 return 0;
5361         }
5362
5363         afb = to_amdgpu_framebuffer(new_state->fb);
5364         obj = new_state->fb->obj[0];
5365         rbo = gem_to_amdgpu_bo(obj);
5366         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5367         INIT_LIST_HEAD(&list);
5368
5369         tv.bo = &rbo->tbo;
5370         tv.num_shared = 1;
5371         list_add(&tv.head, &list);
5372
5373         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5374         if (r) {
5375                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5376                 return r;
5377         }
5378
5379         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5380                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
5381         else
5382                 domain = AMDGPU_GEM_DOMAIN_VRAM;
5383
5384         r = amdgpu_bo_pin(rbo, domain);
5385         if (unlikely(r != 0)) {
5386                 if (r != -ERESTARTSYS)
5387                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5388                 ttm_eu_backoff_reservation(&ticket, &list);
5389                 return r;
5390         }
5391
5392         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5393         if (unlikely(r != 0)) {
5394                 amdgpu_bo_unpin(rbo);
5395                 ttm_eu_backoff_reservation(&ticket, &list);
5396                 DRM_ERROR("%p bind failed\n", rbo);
5397                 return r;
5398         }
5399
5400         amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
5401
5402         tmz_surface = amdgpu_bo_encrypted(rbo);
5403
5404         ttm_eu_backoff_reservation(&ticket, &list);
5405
5406         afb->address = amdgpu_bo_gpu_offset(rbo);
5407
5408         amdgpu_bo_ref(rbo);
5409
5410         if (dm_plane_state_new->dc_state &&
5411                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5412                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
5413
5414                 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5415                 fill_plane_buffer_attributes(
5416                         adev, afb, plane_state->format, plane_state->rotation,
5417                         tiling_flags, &plane_state->tiling_info,
5418                         &plane_state->plane_size, &plane_state->dcc,
5419                         &plane_state->address, tmz_surface,
5420                         force_disable_dcc);
5421         }
5422
5423         return 0;
5424 }
5425
5426 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5427                                        struct drm_plane_state *old_state)
5428 {
5429         struct amdgpu_bo *rbo;
5430         int r;
5431
5432         if (!old_state->fb)
5433                 return;
5434
5435         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5436         r = amdgpu_bo_reserve(rbo, false);
5437         if (unlikely(r)) {
5438                 DRM_ERROR("failed to reserve rbo before unpin\n");
5439                 return;
5440         }
5441
5442         amdgpu_bo_unpin(rbo);
5443         amdgpu_bo_unreserve(rbo);
5444         amdgpu_bo_unref(&rbo);
5445 }
5446
5447 static int dm_plane_atomic_check(struct drm_plane *plane,
5448                                  struct drm_plane_state *state)
5449 {
5450         struct amdgpu_device *adev = plane->dev->dev_private;
5451         struct dc *dc = adev->dm.dc;
5452         struct dm_plane_state *dm_plane_state;
5453         struct dc_scaling_info scaling_info;
5454         int ret;
5455
5456         dm_plane_state = to_dm_plane_state(state);
5457
5458         if (!dm_plane_state->dc_state)
5459                 return 0;
5460
5461         ret = fill_dc_scaling_info(state, &scaling_info);
5462         if (ret)
5463                 return ret;
5464
5465         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5466                 return 0;
5467
5468         return -EINVAL;
5469 }
5470
5471 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5472                                        struct drm_plane_state *new_plane_state)
5473 {
5474         /* Only support async updates on cursor planes. */
5475         if (plane->type != DRM_PLANE_TYPE_CURSOR)
5476                 return -EINVAL;
5477
5478         return 0;
5479 }
5480
5481 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5482                                          struct drm_plane_state *new_state)
5483 {
5484         struct drm_plane_state *old_state =
5485                 drm_atomic_get_old_plane_state(new_state->state, plane);
5486
5487         swap(plane->state->fb, new_state->fb);
5488
5489         plane->state->src_x = new_state->src_x;
5490         plane->state->src_y = new_state->src_y;
5491         plane->state->src_w = new_state->src_w;
5492         plane->state->src_h = new_state->src_h;
5493         plane->state->crtc_x = new_state->crtc_x;
5494         plane->state->crtc_y = new_state->crtc_y;
5495         plane->state->crtc_w = new_state->crtc_w;
5496         plane->state->crtc_h = new_state->crtc_h;
5497
5498         handle_cursor_update(plane, old_state);
5499 }
5500
5501 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5502         .prepare_fb = dm_plane_helper_prepare_fb,
5503         .cleanup_fb = dm_plane_helper_cleanup_fb,
5504         .atomic_check = dm_plane_atomic_check,
5505         .atomic_async_check = dm_plane_atomic_async_check,
5506         .atomic_async_update = dm_plane_atomic_async_update
5507 };
5508
5509 /*
5510  * TODO: these are currently initialized to rgb formats only.
5511  * For future use cases we should either initialize them dynamically based on
5512  * plane capabilities, or initialize this array to all formats, so internal drm
5513  * check will succeed, and let DC implement proper check
5514  */
5515 static const uint32_t rgb_formats[] = {
5516         DRM_FORMAT_XRGB8888,
5517         DRM_FORMAT_ARGB8888,
5518         DRM_FORMAT_RGBA8888,
5519         DRM_FORMAT_XRGB2101010,
5520         DRM_FORMAT_XBGR2101010,
5521         DRM_FORMAT_ARGB2101010,
5522         DRM_FORMAT_ABGR2101010,
5523         DRM_FORMAT_XBGR8888,
5524         DRM_FORMAT_ABGR8888,
5525         DRM_FORMAT_RGB565,
5526 };
5527
5528 static const uint32_t overlay_formats[] = {
5529         DRM_FORMAT_XRGB8888,
5530         DRM_FORMAT_ARGB8888,
5531         DRM_FORMAT_RGBA8888,
5532         DRM_FORMAT_XBGR8888,
5533         DRM_FORMAT_ABGR8888,
5534         DRM_FORMAT_RGB565
5535 };
5536
5537 static const u32 cursor_formats[] = {
5538         DRM_FORMAT_ARGB8888
5539 };
5540
5541 static int get_plane_formats(const struct drm_plane *plane,
5542                              const struct dc_plane_cap *plane_cap,
5543                              uint32_t *formats, int max_formats)
5544 {
5545         int i, num_formats = 0;
5546
5547         /*
5548          * TODO: Query support for each group of formats directly from
5549          * DC plane caps. This will require adding more formats to the
5550          * caps list.
5551          */
5552
5553         switch (plane->type) {
5554         case DRM_PLANE_TYPE_PRIMARY:
5555                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
5556                         if (num_formats >= max_formats)
5557                                 break;
5558
5559                         formats[num_formats++] = rgb_formats[i];
5560                 }
5561
5562                 if (plane_cap && plane_cap->pixel_format_support.nv12)
5563                         formats[num_formats++] = DRM_FORMAT_NV12;
5564                 if (plane_cap && plane_cap->pixel_format_support.p010)
5565                         formats[num_formats++] = DRM_FORMAT_P010;
5566                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
5567                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
5568                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
5569                 }
5570                 break;
5571
5572         case DRM_PLANE_TYPE_OVERLAY:
5573                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
5574                         if (num_formats >= max_formats)
5575                                 break;
5576
5577                         formats[num_formats++] = overlay_formats[i];
5578                 }
5579                 break;
5580
5581         case DRM_PLANE_TYPE_CURSOR:
5582                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
5583                         if (num_formats >= max_formats)
5584                                 break;
5585
5586                         formats[num_formats++] = cursor_formats[i];
5587                 }
5588                 break;
5589         }
5590
5591         return num_formats;
5592 }
5593
5594 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
5595                                 struct drm_plane *plane,
5596                                 unsigned long possible_crtcs,
5597                                 const struct dc_plane_cap *plane_cap)
5598 {
5599         uint32_t formats[32];
5600         int num_formats;
5601         int res = -EPERM;
5602
5603         num_formats = get_plane_formats(plane, plane_cap, formats,
5604                                         ARRAY_SIZE(formats));
5605
5606         res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
5607                                        &dm_plane_funcs, formats, num_formats,
5608                                        NULL, plane->type, NULL);
5609         if (res)
5610                 return res;
5611
5612         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
5613             plane_cap && plane_cap->per_pixel_alpha) {
5614                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
5615                                           BIT(DRM_MODE_BLEND_PREMULTI);
5616
5617                 drm_plane_create_alpha_property(plane);
5618                 drm_plane_create_blend_mode_property(plane, blend_caps);
5619         }
5620
5621         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
5622             plane_cap &&
5623             (plane_cap->pixel_format_support.nv12 ||
5624              plane_cap->pixel_format_support.p010)) {
5625                 /* This only affects YUV formats. */
5626                 drm_plane_create_color_properties(
5627                         plane,
5628                         BIT(DRM_COLOR_YCBCR_BT601) |
5629                         BIT(DRM_COLOR_YCBCR_BT709) |
5630                         BIT(DRM_COLOR_YCBCR_BT2020),
5631                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
5632                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
5633                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
5634         }
5635
5636         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
5637
5638         /* Create (reset) the plane state */
5639         if (plane->funcs->reset)
5640                 plane->funcs->reset(plane);
5641
5642         return 0;
5643 }
5644
5645 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
5646                                struct drm_plane *plane,
5647                                uint32_t crtc_index)
5648 {
5649         struct amdgpu_crtc *acrtc = NULL;
5650         struct drm_plane *cursor_plane;
5651
5652         int res = -ENOMEM;
5653
5654         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
5655         if (!cursor_plane)
5656                 goto fail;
5657
5658         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
5659         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
5660
5661         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
5662         if (!acrtc)
5663                 goto fail;
5664
5665         res = drm_crtc_init_with_planes(
5666                         dm->ddev,
5667                         &acrtc->base,
5668                         plane,
5669                         cursor_plane,
5670                         &amdgpu_dm_crtc_funcs, NULL);
5671
5672         if (res)
5673                 goto fail;
5674
5675         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
5676
5677         /* Create (reset) the plane state */
5678         if (acrtc->base.funcs->reset)
5679                 acrtc->base.funcs->reset(&acrtc->base);
5680
5681         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
5682         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
5683
5684         acrtc->crtc_id = crtc_index;
5685         acrtc->base.enabled = false;
5686         acrtc->otg_inst = -1;
5687
5688         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
5689         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
5690                                    true, MAX_COLOR_LUT_ENTRIES);
5691         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
5692
5693         return 0;
5694
5695 fail:
5696         kfree(acrtc);
5697         kfree(cursor_plane);
5698         return res;
5699 }
5700
5701
5702 static int to_drm_connector_type(enum signal_type st)
5703 {
5704         switch (st) {
5705         case SIGNAL_TYPE_HDMI_TYPE_A:
5706                 return DRM_MODE_CONNECTOR_HDMIA;
5707         case SIGNAL_TYPE_EDP:
5708                 return DRM_MODE_CONNECTOR_eDP;
5709         case SIGNAL_TYPE_LVDS:
5710                 return DRM_MODE_CONNECTOR_LVDS;
5711         case SIGNAL_TYPE_RGB:
5712                 return DRM_MODE_CONNECTOR_VGA;
5713         case SIGNAL_TYPE_DISPLAY_PORT:
5714         case SIGNAL_TYPE_DISPLAY_PORT_MST:
5715                 return DRM_MODE_CONNECTOR_DisplayPort;
5716         case SIGNAL_TYPE_DVI_DUAL_LINK:
5717         case SIGNAL_TYPE_DVI_SINGLE_LINK:
5718                 return DRM_MODE_CONNECTOR_DVID;
5719         case SIGNAL_TYPE_VIRTUAL:
5720                 return DRM_MODE_CONNECTOR_VIRTUAL;
5721
5722         default:
5723                 return DRM_MODE_CONNECTOR_Unknown;
5724         }
5725 }
5726
5727 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
5728 {
5729         struct drm_encoder *encoder;
5730
5731         /* There is only one encoder per connector */
5732         drm_connector_for_each_possible_encoder(connector, encoder)
5733                 return encoder;
5734
5735         return NULL;
5736 }
5737
5738 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
5739 {
5740         struct drm_encoder *encoder;
5741         struct amdgpu_encoder *amdgpu_encoder;
5742
5743         encoder = amdgpu_dm_connector_to_encoder(connector);
5744
5745         if (encoder == NULL)
5746                 return;
5747
5748         amdgpu_encoder = to_amdgpu_encoder(encoder);
5749
5750         amdgpu_encoder->native_mode.clock = 0;
5751
5752         if (!list_empty(&connector->probed_modes)) {
5753                 struct drm_display_mode *preferred_mode = NULL;
5754
5755                 list_for_each_entry(preferred_mode,
5756                                     &connector->probed_modes,
5757                                     head) {
5758                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
5759                                 amdgpu_encoder->native_mode = *preferred_mode;
5760
5761                         break;
5762                 }
5763
5764         }
5765 }
5766
5767 static struct drm_display_mode *
5768 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
5769                              char *name,
5770                              int hdisplay, int vdisplay)
5771 {
5772         struct drm_device *dev = encoder->dev;
5773         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5774         struct drm_display_mode *mode = NULL;
5775         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5776
5777         mode = drm_mode_duplicate(dev, native_mode);
5778
5779         if (mode == NULL)
5780                 return NULL;
5781
5782         mode->hdisplay = hdisplay;
5783         mode->vdisplay = vdisplay;
5784         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
5785         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
5786
5787         return mode;
5788
5789 }
5790
5791 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
5792                                                  struct drm_connector *connector)
5793 {
5794         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
5795         struct drm_display_mode *mode = NULL;
5796         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
5797         struct amdgpu_dm_connector *amdgpu_dm_connector =
5798                                 to_amdgpu_dm_connector(connector);
5799         int i;
5800         int n;
5801         struct mode_size {
5802                 char name[DRM_DISPLAY_MODE_LEN];
5803                 int w;
5804                 int h;
5805         } common_modes[] = {
5806                 {  "640x480",  640,  480},
5807                 {  "800x600",  800,  600},
5808                 { "1024x768", 1024,  768},
5809                 { "1280x720", 1280,  720},
5810                 { "1280x800", 1280,  800},
5811                 {"1280x1024", 1280, 1024},
5812                 { "1440x900", 1440,  900},
5813                 {"1680x1050", 1680, 1050},
5814                 {"1600x1200", 1600, 1200},
5815                 {"1920x1080", 1920, 1080},
5816                 {"1920x1200", 1920, 1200}
5817         };
5818
5819         n = ARRAY_SIZE(common_modes);
5820
5821         for (i = 0; i < n; i++) {
5822                 struct drm_display_mode *curmode = NULL;
5823                 bool mode_existed = false;
5824
5825                 if (common_modes[i].w > native_mode->hdisplay ||
5826                     common_modes[i].h > native_mode->vdisplay ||
5827                    (common_modes[i].w == native_mode->hdisplay &&
5828                     common_modes[i].h == native_mode->vdisplay))
5829                         continue;
5830
5831                 list_for_each_entry(curmode, &connector->probed_modes, head) {
5832                         if (common_modes[i].w == curmode->hdisplay &&
5833                             common_modes[i].h == curmode->vdisplay) {
5834                                 mode_existed = true;
5835                                 break;
5836                         }
5837                 }
5838
5839                 if (mode_existed)
5840                         continue;
5841
5842                 mode = amdgpu_dm_create_common_mode(encoder,
5843                                 common_modes[i].name, common_modes[i].w,
5844                                 common_modes[i].h);
5845                 drm_mode_probed_add(connector, mode);
5846                 amdgpu_dm_connector->num_modes++;
5847         }
5848 }
5849
5850 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
5851                                               struct edid *edid)
5852 {
5853         struct amdgpu_dm_connector *amdgpu_dm_connector =
5854                         to_amdgpu_dm_connector(connector);
5855
5856         if (edid) {
5857                 /* empty probed_modes */
5858                 INIT_LIST_HEAD(&connector->probed_modes);
5859                 amdgpu_dm_connector->num_modes =
5860                                 drm_add_edid_modes(connector, edid);
5861
5862                 /* sorting the probed modes before calling function
5863                  * amdgpu_dm_get_native_mode() since EDID can have
5864                  * more than one preferred mode. The modes that are
5865                  * later in the probed mode list could be of higher
5866                  * and preferred resolution. For example, 3840x2160
5867                  * resolution in base EDID preferred timing and 4096x2160
5868                  * preferred resolution in DID extension block later.
5869                  */
5870                 drm_mode_sort(&connector->probed_modes);
5871                 amdgpu_dm_get_native_mode(connector);
5872         } else {
5873                 amdgpu_dm_connector->num_modes = 0;
5874         }
5875 }
5876
5877 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
5878 {
5879         struct amdgpu_dm_connector *amdgpu_dm_connector =
5880                         to_amdgpu_dm_connector(connector);
5881         struct drm_encoder *encoder;
5882         struct edid *edid = amdgpu_dm_connector->edid;
5883
5884         encoder = amdgpu_dm_connector_to_encoder(connector);
5885
5886         if (!edid || !drm_edid_is_valid(edid)) {
5887                 amdgpu_dm_connector->num_modes =
5888                                 drm_add_modes_noedid(connector, 640, 480);
5889         } else {
5890                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
5891                 amdgpu_dm_connector_add_common_modes(encoder, connector);
5892         }
5893         amdgpu_dm_fbc_init(connector);
5894
5895         return amdgpu_dm_connector->num_modes;
5896 }
5897
5898 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
5899                                      struct amdgpu_dm_connector *aconnector,
5900                                      int connector_type,
5901                                      struct dc_link *link,
5902                                      int link_index)
5903 {
5904         struct amdgpu_device *adev = dm->ddev->dev_private;
5905
5906         /*
5907          * Some of the properties below require access to state, like bpc.
5908          * Allocate some default initial connector state with our reset helper.
5909          */
5910         if (aconnector->base.funcs->reset)
5911                 aconnector->base.funcs->reset(&aconnector->base);
5912
5913         aconnector->connector_id = link_index;
5914         aconnector->dc_link = link;
5915         aconnector->base.interlace_allowed = false;
5916         aconnector->base.doublescan_allowed = false;
5917         aconnector->base.stereo_allowed = false;
5918         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
5919         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
5920         aconnector->audio_inst = -1;
5921         mutex_init(&aconnector->hpd_lock);
5922
5923         /*
5924          * configure support HPD hot plug connector_>polled default value is 0
5925          * which means HPD hot plug not supported
5926          */
5927         switch (connector_type) {
5928         case DRM_MODE_CONNECTOR_HDMIA:
5929                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5930                 aconnector->base.ycbcr_420_allowed =
5931                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
5932                 break;
5933         case DRM_MODE_CONNECTOR_DisplayPort:
5934                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5935                 aconnector->base.ycbcr_420_allowed =
5936                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
5937                 break;
5938         case DRM_MODE_CONNECTOR_DVID:
5939                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
5940                 break;
5941         default:
5942                 break;
5943         }
5944
5945         drm_object_attach_property(&aconnector->base.base,
5946                                 dm->ddev->mode_config.scaling_mode_property,
5947                                 DRM_MODE_SCALE_NONE);
5948
5949         drm_object_attach_property(&aconnector->base.base,
5950                                 adev->mode_info.underscan_property,
5951                                 UNDERSCAN_OFF);
5952         drm_object_attach_property(&aconnector->base.base,
5953                                 adev->mode_info.underscan_hborder_property,
5954                                 0);
5955         drm_object_attach_property(&aconnector->base.base,
5956                                 adev->mode_info.underscan_vborder_property,
5957                                 0);
5958
5959         if (!aconnector->mst_port)
5960                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
5961
5962         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
5963         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
5964         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
5965
5966         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
5967             dc_is_dmcu_initialized(adev->dm.dc)) {
5968                 drm_object_attach_property(&aconnector->base.base,
5969                                 adev->mode_info.abm_level_property, 0);
5970         }
5971
5972         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5973             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5974             connector_type == DRM_MODE_CONNECTOR_eDP) {
5975                 drm_object_attach_property(
5976                         &aconnector->base.base,
5977                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
5978
5979                 if (!aconnector->mst_port)
5980                         drm_connector_attach_vrr_capable_property(&aconnector->base);
5981
5982 #ifdef CONFIG_DRM_AMD_DC_HDCP
5983                 if (adev->dm.hdcp_workqueue)
5984                         drm_connector_attach_content_protection_property(&aconnector->base, true);
5985 #endif
5986         }
5987 }
5988
5989 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
5990                               struct i2c_msg *msgs, int num)
5991 {
5992         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
5993         struct ddc_service *ddc_service = i2c->ddc_service;
5994         struct i2c_command cmd;
5995         int i;
5996         int result = -EIO;
5997
5998         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
5999
6000         if (!cmd.payloads)
6001                 return result;
6002
6003         cmd.number_of_payloads = num;
6004         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6005         cmd.speed = 100;
6006
6007         for (i = 0; i < num; i++) {
6008                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6009                 cmd.payloads[i].address = msgs[i].addr;
6010                 cmd.payloads[i].length = msgs[i].len;
6011                 cmd.payloads[i].data = msgs[i].buf;
6012         }
6013
6014         if (dc_submit_i2c(
6015                         ddc_service->ctx->dc,
6016                         ddc_service->ddc_pin->hw_info.ddc_channel,
6017                         &cmd))
6018                 result = num;
6019
6020         kfree(cmd.payloads);
6021         return result;
6022 }
6023
6024 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6025 {
6026         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6027 }
6028
6029 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6030         .master_xfer = amdgpu_dm_i2c_xfer,
6031         .functionality = amdgpu_dm_i2c_func,
6032 };
6033
6034 static struct amdgpu_i2c_adapter *
6035 create_i2c(struct ddc_service *ddc_service,
6036            int link_index,
6037            int *res)
6038 {
6039         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6040         struct amdgpu_i2c_adapter *i2c;
6041
6042         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6043         if (!i2c)
6044                 return NULL;
6045         i2c->base.owner = THIS_MODULE;
6046         i2c->base.class = I2C_CLASS_DDC;
6047         i2c->base.dev.parent = &adev->pdev->dev;
6048         i2c->base.algo = &amdgpu_dm_i2c_algo;
6049         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6050         i2c_set_adapdata(&i2c->base, i2c);
6051         i2c->ddc_service = ddc_service;
6052         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6053
6054         return i2c;
6055 }
6056
6057
6058 /*
6059  * Note: this function assumes that dc_link_detect() was called for the
6060  * dc_link which will be represented by this aconnector.
6061  */
6062 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6063                                     struct amdgpu_dm_connector *aconnector,
6064                                     uint32_t link_index,
6065                                     struct amdgpu_encoder *aencoder)
6066 {
6067         int res = 0;
6068         int connector_type;
6069         struct dc *dc = dm->dc;
6070         struct dc_link *link = dc_get_link_at_index(dc, link_index);
6071         struct amdgpu_i2c_adapter *i2c;
6072
6073         link->priv = aconnector;
6074
6075         DRM_DEBUG_DRIVER("%s()\n", __func__);
6076
6077         i2c = create_i2c(link->ddc, link->link_index, &res);
6078         if (!i2c) {
6079                 DRM_ERROR("Failed to create i2c adapter data\n");
6080                 return -ENOMEM;
6081         }
6082
6083         aconnector->i2c = i2c;
6084         res = i2c_add_adapter(&i2c->base);
6085
6086         if (res) {
6087                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6088                 goto out_free;
6089         }
6090
6091         connector_type = to_drm_connector_type(link->connector_signal);
6092
6093         res = drm_connector_init_with_ddc(
6094                         dm->ddev,
6095                         &aconnector->base,
6096                         &amdgpu_dm_connector_funcs,
6097                         connector_type,
6098                         &i2c->base);
6099
6100         if (res) {
6101                 DRM_ERROR("connector_init failed\n");
6102                 aconnector->connector_id = -1;
6103                 goto out_free;
6104         }
6105
6106         drm_connector_helper_add(
6107                         &aconnector->base,
6108                         &amdgpu_dm_connector_helper_funcs);
6109
6110         amdgpu_dm_connector_init_helper(
6111                 dm,
6112                 aconnector,
6113                 connector_type,
6114                 link,
6115                 link_index);
6116
6117         drm_connector_attach_encoder(
6118                 &aconnector->base, &aencoder->base);
6119
6120         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6121                 || connector_type == DRM_MODE_CONNECTOR_eDP)
6122                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6123
6124 out_free:
6125         if (res) {
6126                 kfree(i2c);
6127                 aconnector->i2c = NULL;
6128         }
6129         return res;
6130 }
6131
6132 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6133 {
6134         switch (adev->mode_info.num_crtc) {
6135         case 1:
6136                 return 0x1;
6137         case 2:
6138                 return 0x3;
6139         case 3:
6140                 return 0x7;
6141         case 4:
6142                 return 0xf;
6143         case 5:
6144                 return 0x1f;
6145         case 6:
6146         default:
6147                 return 0x3f;
6148         }
6149 }
6150
6151 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6152                                   struct amdgpu_encoder *aencoder,
6153                                   uint32_t link_index)
6154 {
6155         struct amdgpu_device *adev = dev->dev_private;
6156
6157         int res = drm_encoder_init(dev,
6158                                    &aencoder->base,
6159                                    &amdgpu_dm_encoder_funcs,
6160                                    DRM_MODE_ENCODER_TMDS,
6161                                    NULL);
6162
6163         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6164
6165         if (!res)
6166                 aencoder->encoder_id = link_index;
6167         else
6168                 aencoder->encoder_id = -1;
6169
6170         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6171
6172         return res;
6173 }
6174
6175 static void manage_dm_interrupts(struct amdgpu_device *adev,
6176                                  struct amdgpu_crtc *acrtc,
6177                                  bool enable)
6178 {
6179         /*
6180          * this is not correct translation but will work as soon as VBLANK
6181          * constant is the same as PFLIP
6182          */
6183         int irq_type =
6184                 amdgpu_display_crtc_idx_to_irq_type(
6185                         adev,
6186                         acrtc->crtc_id);
6187
6188         if (enable) {
6189                 drm_crtc_vblank_on(&acrtc->base);
6190                 amdgpu_irq_get(
6191                         adev,
6192                         &adev->pageflip_irq,
6193                         irq_type);
6194         } else {
6195
6196                 amdgpu_irq_put(
6197                         adev,
6198                         &adev->pageflip_irq,
6199                         irq_type);
6200                 drm_crtc_vblank_off(&acrtc->base);
6201         }
6202 }
6203
6204 static bool
6205 is_scaling_state_different(const struct dm_connector_state *dm_state,
6206                            const struct dm_connector_state *old_dm_state)
6207 {
6208         if (dm_state->scaling != old_dm_state->scaling)
6209                 return true;
6210         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6211                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6212                         return true;
6213         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6214                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6215                         return true;
6216         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6217                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6218                 return true;
6219         return false;
6220 }
6221
6222 #ifdef CONFIG_DRM_AMD_DC_HDCP
6223 static bool is_content_protection_different(struct drm_connector_state *state,
6224                                             const struct drm_connector_state *old_state,
6225                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6226 {
6227         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6228
6229         if (old_state->hdcp_content_type != state->hdcp_content_type &&
6230             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6231                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6232                 return true;
6233         }
6234
6235         /* CP is being re enabled, ignore this */
6236         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6237             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6238                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6239                 return false;
6240         }
6241
6242         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6243         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6244             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6245                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6246
6247         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6248          * hot-plug, headless s3, dpms
6249          */
6250         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6251             aconnector->dc_sink != NULL)
6252                 return true;
6253
6254         if (old_state->content_protection == state->content_protection)
6255                 return false;
6256
6257         if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6258                 return true;
6259
6260         return false;
6261 }
6262
6263 #endif
6264 static void remove_stream(struct amdgpu_device *adev,
6265                           struct amdgpu_crtc *acrtc,
6266                           struct dc_stream_state *stream)
6267 {
6268         /* this is the update mode case */
6269
6270         acrtc->otg_inst = -1;
6271         acrtc->enabled = false;
6272 }
6273
6274 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6275                                struct dc_cursor_position *position)
6276 {
6277         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6278         int x, y;
6279         int xorigin = 0, yorigin = 0;
6280
6281         position->enable = false;
6282         position->x = 0;
6283         position->y = 0;
6284
6285         if (!crtc || !plane->state->fb)
6286                 return 0;
6287
6288         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6289             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6290                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6291                           __func__,
6292                           plane->state->crtc_w,
6293                           plane->state->crtc_h);
6294                 return -EINVAL;
6295         }
6296
6297         x = plane->state->crtc_x;
6298         y = plane->state->crtc_y;
6299
6300         if (x <= -amdgpu_crtc->max_cursor_width ||
6301             y <= -amdgpu_crtc->max_cursor_height)
6302                 return 0;
6303
6304         if (x < 0) {
6305                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6306                 x = 0;
6307         }
6308         if (y < 0) {
6309                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6310                 y = 0;
6311         }
6312         position->enable = true;
6313         position->translate_by_source = true;
6314         position->x = x;
6315         position->y = y;
6316         position->x_hotspot = xorigin;
6317         position->y_hotspot = yorigin;
6318
6319         return 0;
6320 }
6321
6322 static void handle_cursor_update(struct drm_plane *plane,
6323                                  struct drm_plane_state *old_plane_state)
6324 {
6325         struct amdgpu_device *adev = plane->dev->dev_private;
6326         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6327         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6328         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6329         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6330         uint64_t address = afb ? afb->address : 0;
6331         struct dc_cursor_position position;
6332         struct dc_cursor_attributes attributes;
6333         int ret;
6334
6335         if (!plane->state->fb && !old_plane_state->fb)
6336                 return;
6337
6338         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6339                          __func__,
6340                          amdgpu_crtc->crtc_id,
6341                          plane->state->crtc_w,
6342                          plane->state->crtc_h);
6343
6344         ret = get_cursor_position(plane, crtc, &position);
6345         if (ret)
6346                 return;
6347
6348         if (!position.enable) {
6349                 /* turn off cursor */
6350                 if (crtc_state && crtc_state->stream) {
6351                         mutex_lock(&adev->dm.dc_lock);
6352                         dc_stream_set_cursor_position(crtc_state->stream,
6353                                                       &position);
6354                         mutex_unlock(&adev->dm.dc_lock);
6355                 }
6356                 return;
6357         }
6358
6359         amdgpu_crtc->cursor_width = plane->state->crtc_w;
6360         amdgpu_crtc->cursor_height = plane->state->crtc_h;
6361
6362         memset(&attributes, 0, sizeof(attributes));
6363         attributes.address.high_part = upper_32_bits(address);
6364         attributes.address.low_part  = lower_32_bits(address);
6365         attributes.width             = plane->state->crtc_w;
6366         attributes.height            = plane->state->crtc_h;
6367         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6368         attributes.rotation_angle    = 0;
6369         attributes.attribute_flags.value = 0;
6370
6371         attributes.pitch = attributes.width;
6372
6373         if (crtc_state->stream) {
6374                 mutex_lock(&adev->dm.dc_lock);
6375                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6376                                                          &attributes))
6377                         DRM_ERROR("DC failed to set cursor attributes\n");
6378
6379                 if (!dc_stream_set_cursor_position(crtc_state->stream,
6380                                                    &position))
6381                         DRM_ERROR("DC failed to set cursor position\n");
6382                 mutex_unlock(&adev->dm.dc_lock);
6383         }
6384 }
6385
6386 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6387 {
6388
6389         assert_spin_locked(&acrtc->base.dev->event_lock);
6390         WARN_ON(acrtc->event);
6391
6392         acrtc->event = acrtc->base.state->event;
6393
6394         /* Set the flip status */
6395         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6396
6397         /* Mark this event as consumed */
6398         acrtc->base.state->event = NULL;
6399
6400         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6401                                                  acrtc->crtc_id);
6402 }
6403
6404 static void update_freesync_state_on_stream(
6405         struct amdgpu_display_manager *dm,
6406         struct dm_crtc_state *new_crtc_state,
6407         struct dc_stream_state *new_stream,
6408         struct dc_plane_state *surface,
6409         u32 flip_timestamp_in_us)
6410 {
6411         struct mod_vrr_params vrr_params;
6412         struct dc_info_packet vrr_infopacket = {0};
6413         struct amdgpu_device *adev = dm->adev;
6414         unsigned long flags;
6415
6416         if (!new_stream)
6417                 return;
6418
6419         /*
6420          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6421          * For now it's sufficient to just guard against these conditions.
6422          */
6423
6424         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6425                 return;
6426
6427         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6428         vrr_params = new_crtc_state->vrr_params;
6429
6430         if (surface) {
6431                 mod_freesync_handle_preflip(
6432                         dm->freesync_module,
6433                         surface,
6434                         new_stream,
6435                         flip_timestamp_in_us,
6436                         &vrr_params);
6437
6438                 if (adev->family < AMDGPU_FAMILY_AI &&
6439                     amdgpu_dm_vrr_active(new_crtc_state)) {
6440                         mod_freesync_handle_v_update(dm->freesync_module,
6441                                                      new_stream, &vrr_params);
6442
6443                         /* Need to call this before the frame ends. */
6444                         dc_stream_adjust_vmin_vmax(dm->dc,
6445                                                    new_crtc_state->stream,
6446                                                    &vrr_params.adjust);
6447                 }
6448         }
6449
6450         mod_freesync_build_vrr_infopacket(
6451                 dm->freesync_module,
6452                 new_stream,
6453                 &vrr_params,
6454                 PACKET_TYPE_VRR,
6455                 TRANSFER_FUNC_UNKNOWN,
6456                 &vrr_infopacket);
6457
6458         new_crtc_state->freesync_timing_changed |=
6459                 (memcmp(&new_crtc_state->vrr_params.adjust,
6460                         &vrr_params.adjust,
6461                         sizeof(vrr_params.adjust)) != 0);
6462
6463         new_crtc_state->freesync_vrr_info_changed |=
6464                 (memcmp(&new_crtc_state->vrr_infopacket,
6465                         &vrr_infopacket,
6466                         sizeof(vrr_infopacket)) != 0);
6467
6468         new_crtc_state->vrr_params = vrr_params;
6469         new_crtc_state->vrr_infopacket = vrr_infopacket;
6470
6471         new_stream->adjust = new_crtc_state->vrr_params.adjust;
6472         new_stream->vrr_infopacket = vrr_infopacket;
6473
6474         if (new_crtc_state->freesync_vrr_info_changed)
6475                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6476                               new_crtc_state->base.crtc->base.id,
6477                               (int)new_crtc_state->base.vrr_enabled,
6478                               (int)vrr_params.state);
6479
6480         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6481 }
6482
6483 static void pre_update_freesync_state_on_stream(
6484         struct amdgpu_display_manager *dm,
6485         struct dm_crtc_state *new_crtc_state)
6486 {
6487         struct dc_stream_state *new_stream = new_crtc_state->stream;
6488         struct mod_vrr_params vrr_params;
6489         struct mod_freesync_config config = new_crtc_state->freesync_config;
6490         struct amdgpu_device *adev = dm->adev;
6491         unsigned long flags;
6492
6493         if (!new_stream)
6494                 return;
6495
6496         /*
6497          * TODO: Determine why min/max totals and vrefresh can be 0 here.
6498          * For now it's sufficient to just guard against these conditions.
6499          */
6500         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6501                 return;
6502
6503         spin_lock_irqsave(&adev->ddev->event_lock, flags);
6504         vrr_params = new_crtc_state->vrr_params;
6505
6506         if (new_crtc_state->vrr_supported &&
6507             config.min_refresh_in_uhz &&
6508             config.max_refresh_in_uhz) {
6509                 config.state = new_crtc_state->base.vrr_enabled ?
6510                         VRR_STATE_ACTIVE_VARIABLE :
6511                         VRR_STATE_INACTIVE;
6512         } else {
6513                 config.state = VRR_STATE_UNSUPPORTED;
6514         }
6515
6516         mod_freesync_build_vrr_params(dm->freesync_module,
6517                                       new_stream,
6518                                       &config, &vrr_params);
6519
6520         new_crtc_state->freesync_timing_changed |=
6521                 (memcmp(&new_crtc_state->vrr_params.adjust,
6522                         &vrr_params.adjust,
6523                         sizeof(vrr_params.adjust)) != 0);
6524
6525         new_crtc_state->vrr_params = vrr_params;
6526         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
6527 }
6528
6529 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
6530                                             struct dm_crtc_state *new_state)
6531 {
6532         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
6533         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
6534
6535         if (!old_vrr_active && new_vrr_active) {
6536                 /* Transition VRR inactive -> active:
6537                  * While VRR is active, we must not disable vblank irq, as a
6538                  * reenable after disable would compute bogus vblank/pflip
6539                  * timestamps if it likely happened inside display front-porch.
6540                  *
6541                  * We also need vupdate irq for the actual core vblank handling
6542                  * at end of vblank.
6543                  */
6544                 dm_set_vupdate_irq(new_state->base.crtc, true);
6545                 drm_crtc_vblank_get(new_state->base.crtc);
6546                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
6547                                  __func__, new_state->base.crtc->base.id);
6548         } else if (old_vrr_active && !new_vrr_active) {
6549                 /* Transition VRR active -> inactive:
6550                  * Allow vblank irq disable again for fixed refresh rate.
6551                  */
6552                 dm_set_vupdate_irq(new_state->base.crtc, false);
6553                 drm_crtc_vblank_put(new_state->base.crtc);
6554                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
6555                                  __func__, new_state->base.crtc->base.id);
6556         }
6557 }
6558
6559 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
6560 {
6561         struct drm_plane *plane;
6562         struct drm_plane_state *old_plane_state, *new_plane_state;
6563         int i;
6564
6565         /*
6566          * TODO: Make this per-stream so we don't issue redundant updates for
6567          * commits with multiple streams.
6568          */
6569         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
6570                                        new_plane_state, i)
6571                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6572                         handle_cursor_update(plane, old_plane_state);
6573 }
6574
6575 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
6576                                     struct dc_state *dc_state,
6577                                     struct drm_device *dev,
6578                                     struct amdgpu_display_manager *dm,
6579                                     struct drm_crtc *pcrtc,
6580                                     bool wait_for_vblank)
6581 {
6582         uint32_t i;
6583         uint64_t timestamp_ns;
6584         struct drm_plane *plane;
6585         struct drm_plane_state *old_plane_state, *new_plane_state;
6586         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
6587         struct drm_crtc_state *new_pcrtc_state =
6588                         drm_atomic_get_new_crtc_state(state, pcrtc);
6589         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
6590         struct dm_crtc_state *dm_old_crtc_state =
6591                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
6592         int planes_count = 0, vpos, hpos;
6593         long r;
6594         unsigned long flags;
6595         struct amdgpu_bo *abo;
6596         uint64_t tiling_flags;
6597         bool tmz_surface = false;
6598         uint32_t target_vblank, last_flip_vblank;
6599         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
6600         bool pflip_present = false;
6601         struct {
6602                 struct dc_surface_update surface_updates[MAX_SURFACES];
6603                 struct dc_plane_info plane_infos[MAX_SURFACES];
6604                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
6605                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
6606                 struct dc_stream_update stream_update;
6607         } *bundle;
6608
6609         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
6610
6611         if (!bundle) {
6612                 dm_error("Failed to allocate update bundle\n");
6613                 goto cleanup;
6614         }
6615
6616         /*
6617          * Disable the cursor first if we're disabling all the planes.
6618          * It'll remain on the screen after the planes are re-enabled
6619          * if we don't.
6620          */
6621         if (acrtc_state->active_planes == 0)
6622                 amdgpu_dm_commit_cursors(state);
6623
6624         /* update planes when needed */
6625         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
6626                 struct drm_crtc *crtc = new_plane_state->crtc;
6627                 struct drm_crtc_state *new_crtc_state;
6628                 struct drm_framebuffer *fb = new_plane_state->fb;
6629                 bool plane_needs_flip;
6630                 struct dc_plane_state *dc_plane;
6631                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
6632
6633                 /* Cursor plane is handled after stream updates */
6634                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6635                         continue;
6636
6637                 if (!fb || !crtc || pcrtc != crtc)
6638                         continue;
6639
6640                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
6641                 if (!new_crtc_state->active)
6642                         continue;
6643
6644                 dc_plane = dm_new_plane_state->dc_state;
6645
6646                 bundle->surface_updates[planes_count].surface = dc_plane;
6647                 if (new_pcrtc_state->color_mgmt_changed) {
6648                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
6649                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
6650                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
6651                 }
6652
6653                 fill_dc_scaling_info(new_plane_state,
6654                                      &bundle->scaling_infos[planes_count]);
6655
6656                 bundle->surface_updates[planes_count].scaling_info =
6657                         &bundle->scaling_infos[planes_count];
6658
6659                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
6660
6661                 pflip_present = pflip_present || plane_needs_flip;
6662
6663                 if (!plane_needs_flip) {
6664                         planes_count += 1;
6665                         continue;
6666                 }
6667
6668                 abo = gem_to_amdgpu_bo(fb->obj[0]);
6669
6670                 /*
6671                  * Wait for all fences on this FB. Do limited wait to avoid
6672                  * deadlock during GPU reset when this fence will not signal
6673                  * but we hold reservation lock for the BO.
6674                  */
6675                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
6676                                                         false,
6677                                                         msecs_to_jiffies(5000));
6678                 if (unlikely(r <= 0))
6679                         DRM_ERROR("Waiting for fences timed out!");
6680
6681                 /*
6682                  * TODO This might fail and hence better not used, wait
6683                  * explicitly on fences instead
6684                  * and in general should be called for
6685                  * blocking commit to as per framework helpers
6686                  */
6687                 r = amdgpu_bo_reserve(abo, true);
6688                 if (unlikely(r != 0))
6689                         DRM_ERROR("failed to reserve buffer before flip\n");
6690
6691                 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
6692
6693                 tmz_surface = amdgpu_bo_encrypted(abo);
6694
6695                 amdgpu_bo_unreserve(abo);
6696
6697                 fill_dc_plane_info_and_addr(
6698                         dm->adev, new_plane_state, tiling_flags,
6699                         &bundle->plane_infos[planes_count],
6700                         &bundle->flip_addrs[planes_count].address,
6701                         tmz_surface,
6702                         false);
6703
6704                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
6705                                  new_plane_state->plane->index,
6706                                  bundle->plane_infos[planes_count].dcc.enable);
6707
6708                 bundle->surface_updates[planes_count].plane_info =
6709                         &bundle->plane_infos[planes_count];
6710
6711                 /*
6712                  * Only allow immediate flips for fast updates that don't
6713                  * change FB pitch, DCC state, rotation or mirroing.
6714                  */
6715                 bundle->flip_addrs[planes_count].flip_immediate =
6716                         crtc->state->async_flip &&
6717                         acrtc_state->update_type == UPDATE_TYPE_FAST;
6718
6719                 timestamp_ns = ktime_get_ns();
6720                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
6721                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
6722                 bundle->surface_updates[planes_count].surface = dc_plane;
6723
6724                 if (!bundle->surface_updates[planes_count].surface) {
6725                         DRM_ERROR("No surface for CRTC: id=%d\n",
6726                                         acrtc_attach->crtc_id);
6727                         continue;
6728                 }
6729
6730                 if (plane == pcrtc->primary)
6731                         update_freesync_state_on_stream(
6732                                 dm,
6733                                 acrtc_state,
6734                                 acrtc_state->stream,
6735                                 dc_plane,
6736                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
6737
6738                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
6739                                  __func__,
6740                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
6741                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
6742
6743                 planes_count += 1;
6744
6745         }
6746
6747         if (pflip_present) {
6748                 if (!vrr_active) {
6749                         /* Use old throttling in non-vrr fixed refresh rate mode
6750                          * to keep flip scheduling based on target vblank counts
6751                          * working in a backwards compatible way, e.g., for
6752                          * clients using the GLX_OML_sync_control extension or
6753                          * DRI3/Present extension with defined target_msc.
6754                          */
6755                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
6756                 }
6757                 else {
6758                         /* For variable refresh rate mode only:
6759                          * Get vblank of last completed flip to avoid > 1 vrr
6760                          * flips per video frame by use of throttling, but allow
6761                          * flip programming anywhere in the possibly large
6762                          * variable vrr vblank interval for fine-grained flip
6763                          * timing control and more opportunity to avoid stutter
6764                          * on late submission of flips.
6765                          */
6766                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6767                         last_flip_vblank = acrtc_attach->last_flip_vblank;
6768                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6769                 }
6770
6771                 target_vblank = last_flip_vblank + wait_for_vblank;
6772
6773                 /*
6774                  * Wait until we're out of the vertical blank period before the one
6775                  * targeted by the flip
6776                  */
6777                 while ((acrtc_attach->enabled &&
6778                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
6779                                                             0, &vpos, &hpos, NULL,
6780                                                             NULL, &pcrtc->hwmode)
6781                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
6782                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
6783                         (int)(target_vblank -
6784                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
6785                         usleep_range(1000, 1100);
6786                 }
6787
6788                 if (acrtc_attach->base.state->event) {
6789                         drm_crtc_vblank_get(pcrtc);
6790
6791                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6792
6793                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
6794                         prepare_flip_isr(acrtc_attach);
6795
6796                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6797                 }
6798
6799                 if (acrtc_state->stream) {
6800                         if (acrtc_state->freesync_vrr_info_changed)
6801                                 bundle->stream_update.vrr_infopacket =
6802                                         &acrtc_state->stream->vrr_infopacket;
6803                 }
6804         }
6805
6806         /* Update the planes if changed or disable if we don't have any. */
6807         if ((planes_count || acrtc_state->active_planes == 0) &&
6808                 acrtc_state->stream) {
6809                 bundle->stream_update.stream = acrtc_state->stream;
6810                 if (new_pcrtc_state->mode_changed) {
6811                         bundle->stream_update.src = acrtc_state->stream->src;
6812                         bundle->stream_update.dst = acrtc_state->stream->dst;
6813                 }
6814
6815                 if (new_pcrtc_state->color_mgmt_changed) {
6816                         /*
6817                          * TODO: This isn't fully correct since we've actually
6818                          * already modified the stream in place.
6819                          */
6820                         bundle->stream_update.gamut_remap =
6821                                 &acrtc_state->stream->gamut_remap_matrix;
6822                         bundle->stream_update.output_csc_transform =
6823                                 &acrtc_state->stream->csc_color_matrix;
6824                         bundle->stream_update.out_transfer_func =
6825                                 acrtc_state->stream->out_transfer_func;
6826                 }
6827
6828                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
6829                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
6830                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
6831
6832                 /*
6833                  * If FreeSync state on the stream has changed then we need to
6834                  * re-adjust the min/max bounds now that DC doesn't handle this
6835                  * as part of commit.
6836                  */
6837                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
6838                     amdgpu_dm_vrr_active(acrtc_state)) {
6839                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
6840                         dc_stream_adjust_vmin_vmax(
6841                                 dm->dc, acrtc_state->stream,
6842                                 &acrtc_state->vrr_params.adjust);
6843                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
6844                 }
6845                 mutex_lock(&dm->dc_lock);
6846                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6847                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
6848                         amdgpu_dm_psr_disable(acrtc_state->stream);
6849
6850                 dc_commit_updates_for_stream(dm->dc,
6851                                                      bundle->surface_updates,
6852                                                      planes_count,
6853                                                      acrtc_state->stream,
6854                                                      &bundle->stream_update,
6855                                                      dc_state);
6856
6857                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
6858                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
6859                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
6860                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
6861                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
6862                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
6863                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
6864                         amdgpu_dm_psr_enable(acrtc_state->stream);
6865                 }
6866
6867                 mutex_unlock(&dm->dc_lock);
6868         }
6869
6870         /*
6871          * Update cursor state *after* programming all the planes.
6872          * This avoids redundant programming in the case where we're going
6873          * to be disabling a single plane - those pipes are being disabled.
6874          */
6875         if (acrtc_state->active_planes)
6876                 amdgpu_dm_commit_cursors(state);
6877
6878 cleanup:
6879         kfree(bundle);
6880 }
6881
6882 static void amdgpu_dm_commit_audio(struct drm_device *dev,
6883                                    struct drm_atomic_state *state)
6884 {
6885         struct amdgpu_device *adev = dev->dev_private;
6886         struct amdgpu_dm_connector *aconnector;
6887         struct drm_connector *connector;
6888         struct drm_connector_state *old_con_state, *new_con_state;
6889         struct drm_crtc_state *new_crtc_state;
6890         struct dm_crtc_state *new_dm_crtc_state;
6891         const struct dc_stream_status *status;
6892         int i, inst;
6893
6894         /* Notify device removals. */
6895         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6896                 if (old_con_state->crtc != new_con_state->crtc) {
6897                         /* CRTC changes require notification. */
6898                         goto notify;
6899                 }
6900
6901                 if (!new_con_state->crtc)
6902                         continue;
6903
6904                 new_crtc_state = drm_atomic_get_new_crtc_state(
6905                         state, new_con_state->crtc);
6906
6907                 if (!new_crtc_state)
6908                         continue;
6909
6910                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6911                         continue;
6912
6913         notify:
6914                 aconnector = to_amdgpu_dm_connector(connector);
6915
6916                 mutex_lock(&adev->dm.audio_lock);
6917                 inst = aconnector->audio_inst;
6918                 aconnector->audio_inst = -1;
6919                 mutex_unlock(&adev->dm.audio_lock);
6920
6921                 amdgpu_dm_audio_eld_notify(adev, inst);
6922         }
6923
6924         /* Notify audio device additions. */
6925         for_each_new_connector_in_state(state, connector, new_con_state, i) {
6926                 if (!new_con_state->crtc)
6927                         continue;
6928
6929                 new_crtc_state = drm_atomic_get_new_crtc_state(
6930                         state, new_con_state->crtc);
6931
6932                 if (!new_crtc_state)
6933                         continue;
6934
6935                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
6936                         continue;
6937
6938                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
6939                 if (!new_dm_crtc_state->stream)
6940                         continue;
6941
6942                 status = dc_stream_get_status(new_dm_crtc_state->stream);
6943                 if (!status)
6944                         continue;
6945
6946                 aconnector = to_amdgpu_dm_connector(connector);
6947
6948                 mutex_lock(&adev->dm.audio_lock);
6949                 inst = status->audio_inst;
6950                 aconnector->audio_inst = inst;
6951                 mutex_unlock(&adev->dm.audio_lock);
6952
6953                 amdgpu_dm_audio_eld_notify(adev, inst);
6954         }
6955 }
6956
6957 /*
6958  * Enable interrupts on CRTCs that are newly active, undergone
6959  * a modeset, or have active planes again.
6960  *
6961  * Done in two passes, based on the for_modeset flag:
6962  * Pass 1: For CRTCs going through modeset
6963  * Pass 2: For CRTCs going from 0 to n active planes
6964  *
6965  * Interrupts can only be enabled after the planes are programmed,
6966  * so this requires a two-pass approach since we don't want to
6967  * just defer the interrupts until after commit planes every time.
6968  */
6969 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
6970                                              struct drm_atomic_state *state,
6971                                              bool for_modeset)
6972 {
6973         struct amdgpu_device *adev = dev->dev_private;
6974         struct drm_crtc *crtc;
6975         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
6976         int i;
6977 #ifdef CONFIG_DEBUG_FS
6978         enum amdgpu_dm_pipe_crc_source source;
6979 #endif
6980
6981         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
6982                                       new_crtc_state, i) {
6983                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6984                 struct dm_crtc_state *dm_new_crtc_state =
6985                         to_dm_crtc_state(new_crtc_state);
6986                 struct dm_crtc_state *dm_old_crtc_state =
6987                         to_dm_crtc_state(old_crtc_state);
6988                 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
6989                 bool run_pass;
6990
6991                 run_pass = (for_modeset && modeset) ||
6992                            (!for_modeset && !modeset &&
6993                             !dm_old_crtc_state->interrupts_enabled);
6994
6995                 if (!run_pass)
6996                         continue;
6997
6998                 if (!dm_new_crtc_state->interrupts_enabled)
6999                         continue;
7000
7001                 manage_dm_interrupts(adev, acrtc, true);
7002
7003 #ifdef CONFIG_DEBUG_FS
7004                 /* The stream has changed so CRC capture needs to re-enabled. */
7005                 source = dm_new_crtc_state->crc_src;
7006                 if (amdgpu_dm_is_valid_crc_source(source)) {
7007                         amdgpu_dm_crtc_configure_crc_source(
7008                                 crtc, dm_new_crtc_state,
7009                                 dm_new_crtc_state->crc_src);
7010                 }
7011 #endif
7012         }
7013 }
7014
7015 /*
7016  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7017  * @crtc_state: the DRM CRTC state
7018  * @stream_state: the DC stream state.
7019  *
7020  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7021  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7022  */
7023 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7024                                                 struct dc_stream_state *stream_state)
7025 {
7026         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7027 }
7028
7029 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7030                                    struct drm_atomic_state *state,
7031                                    bool nonblock)
7032 {
7033         struct drm_crtc *crtc;
7034         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7035         struct amdgpu_device *adev = dev->dev_private;
7036         int i;
7037
7038         /*
7039          * We evade vblank and pflip interrupts on CRTCs that are undergoing
7040          * a modeset, being disabled, or have no active planes.
7041          *
7042          * It's done in atomic commit rather than commit tail for now since
7043          * some of these interrupt handlers access the current CRTC state and
7044          * potentially the stream pointer itself.
7045          *
7046          * Since the atomic state is swapped within atomic commit and not within
7047          * commit tail this would leave to new state (that hasn't been committed yet)
7048          * being accesssed from within the handlers.
7049          *
7050          * TODO: Fix this so we can do this in commit tail and not have to block
7051          * in atomic check.
7052          */
7053         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7054                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7055                 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7056                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7057
7058                 if (dm_old_crtc_state->interrupts_enabled &&
7059                     (!dm_new_crtc_state->interrupts_enabled ||
7060                      drm_atomic_crtc_needs_modeset(new_crtc_state)))
7061                         manage_dm_interrupts(adev, acrtc, false);
7062         }
7063         /*
7064          * Add check here for SoC's that support hardware cursor plane, to
7065          * unset legacy_cursor_update
7066          */
7067
7068         return drm_atomic_helper_commit(dev, state, nonblock);
7069
7070         /*TODO Handle EINTR, reenable IRQ*/
7071 }
7072
7073 /**
7074  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7075  * @state: The atomic state to commit
7076  *
7077  * This will tell DC to commit the constructed DC state from atomic_check,
7078  * programming the hardware. Any failures here implies a hardware failure, since
7079  * atomic check should have filtered anything non-kosher.
7080  */
7081 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7082 {
7083         struct drm_device *dev = state->dev;
7084         struct amdgpu_device *adev = dev->dev_private;
7085         struct amdgpu_display_manager *dm = &adev->dm;
7086         struct dm_atomic_state *dm_state;
7087         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7088         uint32_t i, j;
7089         struct drm_crtc *crtc;
7090         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7091         unsigned long flags;
7092         bool wait_for_vblank = true;
7093         struct drm_connector *connector;
7094         struct drm_connector_state *old_con_state, *new_con_state;
7095         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7096         int crtc_disable_count = 0;
7097
7098         drm_atomic_helper_update_legacy_modeset_state(dev, state);
7099
7100         dm_state = dm_atomic_get_new_state(state);
7101         if (dm_state && dm_state->context) {
7102                 dc_state = dm_state->context;
7103         } else {
7104                 /* No state changes, retain current state. */
7105                 dc_state_temp = dc_create_state(dm->dc);
7106                 ASSERT(dc_state_temp);
7107                 dc_state = dc_state_temp;
7108                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7109         }
7110
7111         /* update changed items */
7112         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7113                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7114
7115                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7116                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7117
7118                 DRM_DEBUG_DRIVER(
7119                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7120                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7121                         "connectors_changed:%d\n",
7122                         acrtc->crtc_id,
7123                         new_crtc_state->enable,
7124                         new_crtc_state->active,
7125                         new_crtc_state->planes_changed,
7126                         new_crtc_state->mode_changed,
7127                         new_crtc_state->active_changed,
7128                         new_crtc_state->connectors_changed);
7129
7130                 /* Copy all transient state flags into dc state */
7131                 if (dm_new_crtc_state->stream) {
7132                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7133                                                             dm_new_crtc_state->stream);
7134                 }
7135
7136                 /* handles headless hotplug case, updating new_state and
7137                  * aconnector as needed
7138                  */
7139
7140                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7141
7142                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7143
7144                         if (!dm_new_crtc_state->stream) {
7145                                 /*
7146                                  * this could happen because of issues with
7147                                  * userspace notifications delivery.
7148                                  * In this case userspace tries to set mode on
7149                                  * display which is disconnected in fact.
7150                                  * dc_sink is NULL in this case on aconnector.
7151                                  * We expect reset mode will come soon.
7152                                  *
7153                                  * This can also happen when unplug is done
7154                                  * during resume sequence ended
7155                                  *
7156                                  * In this case, we want to pretend we still
7157                                  * have a sink to keep the pipe running so that
7158                                  * hw state is consistent with the sw state
7159                                  */
7160                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7161                                                 __func__, acrtc->base.base.id);
7162                                 continue;
7163                         }
7164
7165                         if (dm_old_crtc_state->stream)
7166                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7167
7168                         pm_runtime_get_noresume(dev->dev);
7169
7170                         acrtc->enabled = true;
7171                         acrtc->hw_mode = new_crtc_state->mode;
7172                         crtc->hwmode = new_crtc_state->mode;
7173                 } else if (modereset_required(new_crtc_state)) {
7174                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7175                         /* i.e. reset mode */
7176                         if (dm_old_crtc_state->stream) {
7177                                 if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
7178                                         amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
7179
7180                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7181                         }
7182                 }
7183         } /* for_each_crtc_in_state() */
7184
7185         if (dc_state) {
7186                 dm_enable_per_frame_crtc_master_sync(dc_state);
7187                 mutex_lock(&dm->dc_lock);
7188                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
7189                 mutex_unlock(&dm->dc_lock);
7190         }
7191
7192         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7193                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7194
7195                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7196
7197                 if (dm_new_crtc_state->stream != NULL) {
7198                         const struct dc_stream_status *status =
7199                                         dc_stream_get_status(dm_new_crtc_state->stream);
7200
7201                         if (!status)
7202                                 status = dc_stream_get_status_from_state(dc_state,
7203                                                                          dm_new_crtc_state->stream);
7204
7205                         if (!status)
7206                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7207                         else
7208                                 acrtc->otg_inst = status->primary_otg_inst;
7209                 }
7210         }
7211 #ifdef CONFIG_DRM_AMD_DC_HDCP
7212         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7213                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7214                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7215                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7216
7217                 new_crtc_state = NULL;
7218
7219                 if (acrtc)
7220                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7221
7222                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7223
7224                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7225                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7226                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7227                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7228                         continue;
7229                 }
7230
7231                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7232                         hdcp_update_display(
7233                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7234                                 new_con_state->hdcp_content_type,
7235                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7236                                                                                                          : false);
7237         }
7238 #endif
7239
7240         /* Handle connector state changes */
7241         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7242                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7243                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7244                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7245                 struct dc_surface_update dummy_updates[MAX_SURFACES];
7246                 struct dc_stream_update stream_update;
7247                 struct dc_info_packet hdr_packet;
7248                 struct dc_stream_status *status = NULL;
7249                 bool abm_changed, hdr_changed, scaling_changed;
7250
7251                 memset(&dummy_updates, 0, sizeof(dummy_updates));
7252                 memset(&stream_update, 0, sizeof(stream_update));
7253
7254                 if (acrtc) {
7255                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7256                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7257                 }
7258
7259                 /* Skip any modesets/resets */
7260                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7261                         continue;
7262
7263                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7264                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7265
7266                 scaling_changed = is_scaling_state_different(dm_new_con_state,
7267                                                              dm_old_con_state);
7268
7269                 abm_changed = dm_new_crtc_state->abm_level !=
7270                               dm_old_crtc_state->abm_level;
7271
7272                 hdr_changed =
7273                         is_hdr_metadata_different(old_con_state, new_con_state);
7274
7275                 if (!scaling_changed && !abm_changed && !hdr_changed)
7276                         continue;
7277
7278                 stream_update.stream = dm_new_crtc_state->stream;
7279                 if (scaling_changed) {
7280                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7281                                         dm_new_con_state, dm_new_crtc_state->stream);
7282
7283                         stream_update.src = dm_new_crtc_state->stream->src;
7284                         stream_update.dst = dm_new_crtc_state->stream->dst;
7285                 }
7286
7287                 if (abm_changed) {
7288                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7289
7290                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
7291                 }
7292
7293                 if (hdr_changed) {
7294                         fill_hdr_info_packet(new_con_state, &hdr_packet);
7295                         stream_update.hdr_static_metadata = &hdr_packet;
7296                 }
7297
7298                 status = dc_stream_get_status(dm_new_crtc_state->stream);
7299                 WARN_ON(!status);
7300                 WARN_ON(!status->plane_count);
7301
7302                 /*
7303                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
7304                  * Here we create an empty update on each plane.
7305                  * To fix this, DC should permit updating only stream properties.
7306                  */
7307                 for (j = 0; j < status->plane_count; j++)
7308                         dummy_updates[j].surface = status->plane_states[0];
7309
7310
7311                 mutex_lock(&dm->dc_lock);
7312                 dc_commit_updates_for_stream(dm->dc,
7313                                                      dummy_updates,
7314                                                      status->plane_count,
7315                                                      dm_new_crtc_state->stream,
7316                                                      &stream_update,
7317                                                      dc_state);
7318                 mutex_unlock(&dm->dc_lock);
7319         }
7320
7321         /* Count number of newly disabled CRTCs for dropping PM refs later. */
7322         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7323                                       new_crtc_state, i) {
7324                 if (old_crtc_state->active && !new_crtc_state->active)
7325                         crtc_disable_count++;
7326
7327                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7328                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7329
7330                 /* Update freesync active state. */
7331                 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
7332
7333                 /* Handle vrr on->off / off->on transitions */
7334                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7335                                                 dm_new_crtc_state);
7336         }
7337
7338         /* Enable interrupts for CRTCs going through a modeset. */
7339         amdgpu_dm_enable_crtc_interrupts(dev, state, true);
7340
7341         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7342                 if (new_crtc_state->async_flip)
7343                         wait_for_vblank = false;
7344
7345         /* update planes when needed per crtc*/
7346         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7347                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7348
7349                 if (dm_new_crtc_state->stream)
7350                         amdgpu_dm_commit_planes(state, dc_state, dev,
7351                                                 dm, crtc, wait_for_vblank);
7352         }
7353
7354         /* Enable interrupts for CRTCs going from 0 to n active planes. */
7355         amdgpu_dm_enable_crtc_interrupts(dev, state, false);
7356
7357         /* Update audio instances for each connector. */
7358         amdgpu_dm_commit_audio(dev, state);
7359
7360         /*
7361          * send vblank event on all events not handled in flip and
7362          * mark consumed event for drm_atomic_helper_commit_hw_done
7363          */
7364         spin_lock_irqsave(&adev->ddev->event_lock, flags);
7365         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7366
7367                 if (new_crtc_state->event)
7368                         drm_send_event_locked(dev, &new_crtc_state->event->base);
7369
7370                 new_crtc_state->event = NULL;
7371         }
7372         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
7373
7374         /* Signal HW programming completion */
7375         drm_atomic_helper_commit_hw_done(state);
7376
7377         if (wait_for_vblank)
7378                 drm_atomic_helper_wait_for_flip_done(dev, state);
7379
7380         drm_atomic_helper_cleanup_planes(dev, state);
7381
7382         /*
7383          * Finally, drop a runtime PM reference for each newly disabled CRTC,
7384          * so we can put the GPU into runtime suspend if we're not driving any
7385          * displays anymore
7386          */
7387         for (i = 0; i < crtc_disable_count; i++)
7388                 pm_runtime_put_autosuspend(dev->dev);
7389         pm_runtime_mark_last_busy(dev->dev);
7390
7391         if (dc_state_temp)
7392                 dc_release_state(dc_state_temp);
7393 }
7394
7395
7396 static int dm_force_atomic_commit(struct drm_connector *connector)
7397 {
7398         int ret = 0;
7399         struct drm_device *ddev = connector->dev;
7400         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7401         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7402         struct drm_plane *plane = disconnected_acrtc->base.primary;
7403         struct drm_connector_state *conn_state;
7404         struct drm_crtc_state *crtc_state;
7405         struct drm_plane_state *plane_state;
7406
7407         if (!state)
7408                 return -ENOMEM;
7409
7410         state->acquire_ctx = ddev->mode_config.acquire_ctx;
7411
7412         /* Construct an atomic state to restore previous display setting */
7413
7414         /*
7415          * Attach connectors to drm_atomic_state
7416          */
7417         conn_state = drm_atomic_get_connector_state(state, connector);
7418
7419         ret = PTR_ERR_OR_ZERO(conn_state);
7420         if (ret)
7421                 goto err;
7422
7423         /* Attach crtc to drm_atomic_state*/
7424         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7425
7426         ret = PTR_ERR_OR_ZERO(crtc_state);
7427         if (ret)
7428                 goto err;
7429
7430         /* force a restore */
7431         crtc_state->mode_changed = true;
7432
7433         /* Attach plane to drm_atomic_state */
7434         plane_state = drm_atomic_get_plane_state(state, plane);
7435
7436         ret = PTR_ERR_OR_ZERO(plane_state);
7437         if (ret)
7438                 goto err;
7439
7440
7441         /* Call commit internally with the state we just constructed */
7442         ret = drm_atomic_commit(state);
7443         if (!ret)
7444                 return 0;
7445
7446 err:
7447         DRM_ERROR("Restoring old state failed with %i\n", ret);
7448         drm_atomic_state_put(state);
7449
7450         return ret;
7451 }
7452
7453 /*
7454  * This function handles all cases when set mode does not come upon hotplug.
7455  * This includes when a display is unplugged then plugged back into the
7456  * same port and when running without usermode desktop manager supprot
7457  */
7458 void dm_restore_drm_connector_state(struct drm_device *dev,
7459                                     struct drm_connector *connector)
7460 {
7461         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7462         struct amdgpu_crtc *disconnected_acrtc;
7463         struct dm_crtc_state *acrtc_state;
7464
7465         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7466                 return;
7467
7468         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7469         if (!disconnected_acrtc)
7470                 return;
7471
7472         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7473         if (!acrtc_state->stream)
7474                 return;
7475
7476         /*
7477          * If the previous sink is not released and different from the current,
7478          * we deduce we are in a state where we can not rely on usermode call
7479          * to turn on the display, so we do it here
7480          */
7481         if (acrtc_state->stream->sink != aconnector->dc_sink)
7482                 dm_force_atomic_commit(&aconnector->base);
7483 }
7484
7485 /*
7486  * Grabs all modesetting locks to serialize against any blocking commits,
7487  * Waits for completion of all non blocking commits.
7488  */
7489 static int do_aquire_global_lock(struct drm_device *dev,
7490                                  struct drm_atomic_state *state)
7491 {
7492         struct drm_crtc *crtc;
7493         struct drm_crtc_commit *commit;
7494         long ret;
7495
7496         /*
7497          * Adding all modeset locks to aquire_ctx will
7498          * ensure that when the framework release it the
7499          * extra locks we are locking here will get released to
7500          */
7501         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7502         if (ret)
7503                 return ret;
7504
7505         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7506                 spin_lock(&crtc->commit_lock);
7507                 commit = list_first_entry_or_null(&crtc->commit_list,
7508                                 struct drm_crtc_commit, commit_entry);
7509                 if (commit)
7510                         drm_crtc_commit_get(commit);
7511                 spin_unlock(&crtc->commit_lock);
7512
7513                 if (!commit)
7514                         continue;
7515
7516                 /*
7517                  * Make sure all pending HW programming completed and
7518                  * page flips done
7519                  */
7520                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7521
7522                 if (ret > 0)
7523                         ret = wait_for_completion_interruptible_timeout(
7524                                         &commit->flip_done, 10*HZ);
7525
7526                 if (ret == 0)
7527                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7528                                   "timed out\n", crtc->base.id, crtc->name);
7529
7530                 drm_crtc_commit_put(commit);
7531         }
7532
7533         return ret < 0 ? ret : 0;
7534 }
7535
7536 static void get_freesync_config_for_crtc(
7537         struct dm_crtc_state *new_crtc_state,
7538         struct dm_connector_state *new_con_state)
7539 {
7540         struct mod_freesync_config config = {0};
7541         struct amdgpu_dm_connector *aconnector =
7542                         to_amdgpu_dm_connector(new_con_state->base.connector);
7543         struct drm_display_mode *mode = &new_crtc_state->base.mode;
7544         int vrefresh = drm_mode_vrefresh(mode);
7545
7546         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
7547                                         vrefresh >= aconnector->min_vfreq &&
7548                                         vrefresh <= aconnector->max_vfreq;
7549
7550         if (new_crtc_state->vrr_supported) {
7551                 new_crtc_state->stream->ignore_msa_timing_param = true;
7552                 config.state = new_crtc_state->base.vrr_enabled ?
7553                                 VRR_STATE_ACTIVE_VARIABLE :
7554                                 VRR_STATE_INACTIVE;
7555                 config.min_refresh_in_uhz =
7556                                 aconnector->min_vfreq * 1000000;
7557                 config.max_refresh_in_uhz =
7558                                 aconnector->max_vfreq * 1000000;
7559                 config.vsif_supported = true;
7560                 config.btr = true;
7561         }
7562
7563         new_crtc_state->freesync_config = config;
7564 }
7565
7566 static void reset_freesync_config_for_crtc(
7567         struct dm_crtc_state *new_crtc_state)
7568 {
7569         new_crtc_state->vrr_supported = false;
7570
7571         memset(&new_crtc_state->vrr_params, 0,
7572                sizeof(new_crtc_state->vrr_params));
7573         memset(&new_crtc_state->vrr_infopacket, 0,
7574                sizeof(new_crtc_state->vrr_infopacket));
7575 }
7576
7577 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
7578                                 struct drm_atomic_state *state,
7579                                 struct drm_crtc *crtc,
7580                                 struct drm_crtc_state *old_crtc_state,
7581                                 struct drm_crtc_state *new_crtc_state,
7582                                 bool enable,
7583                                 bool *lock_and_validation_needed)
7584 {
7585         struct dm_atomic_state *dm_state = NULL;
7586         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7587         struct dc_stream_state *new_stream;
7588         int ret = 0;
7589
7590         /*
7591          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
7592          * update changed items
7593          */
7594         struct amdgpu_crtc *acrtc = NULL;
7595         struct amdgpu_dm_connector *aconnector = NULL;
7596         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
7597         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
7598
7599         new_stream = NULL;
7600
7601         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7602         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7603         acrtc = to_amdgpu_crtc(crtc);
7604         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
7605
7606         /* TODO This hack should go away */
7607         if (aconnector && enable) {
7608                 /* Make sure fake sink is created in plug-in scenario */
7609                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
7610                                                             &aconnector->base);
7611                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
7612                                                             &aconnector->base);
7613
7614                 if (IS_ERR(drm_new_conn_state)) {
7615                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
7616                         goto fail;
7617                 }
7618
7619                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
7620                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
7621
7622                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7623                         goto skip_modeset;
7624
7625                 new_stream = create_stream_for_sink(aconnector,
7626                                                      &new_crtc_state->mode,
7627                                                     dm_new_conn_state,
7628                                                     dm_old_crtc_state->stream);
7629
7630                 /*
7631                  * we can have no stream on ACTION_SET if a display
7632                  * was disconnected during S3, in this case it is not an
7633                  * error, the OS will be updated after detection, and
7634                  * will do the right thing on next atomic commit
7635                  */
7636
7637                 if (!new_stream) {
7638                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7639                                         __func__, acrtc->base.base.id);
7640                         ret = -ENOMEM;
7641                         goto fail;
7642                 }
7643
7644                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7645
7646                 ret = fill_hdr_info_packet(drm_new_conn_state,
7647                                            &new_stream->hdr_static_metadata);
7648                 if (ret)
7649                         goto fail;
7650
7651                 /*
7652                  * If we already removed the old stream from the context
7653                  * (and set the new stream to NULL) then we can't reuse
7654                  * the old stream even if the stream and scaling are unchanged.
7655                  * We'll hit the BUG_ON and black screen.
7656                  *
7657                  * TODO: Refactor this function to allow this check to work
7658                  * in all conditions.
7659                  */
7660                 if (dm_new_crtc_state->stream &&
7661                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
7662                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
7663                         new_crtc_state->mode_changed = false;
7664                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
7665                                          new_crtc_state->mode_changed);
7666                 }
7667         }
7668
7669         /* mode_changed flag may get updated above, need to check again */
7670         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7671                 goto skip_modeset;
7672
7673         DRM_DEBUG_DRIVER(
7674                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7675                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
7676                 "connectors_changed:%d\n",
7677                 acrtc->crtc_id,
7678                 new_crtc_state->enable,
7679                 new_crtc_state->active,
7680                 new_crtc_state->planes_changed,
7681                 new_crtc_state->mode_changed,
7682                 new_crtc_state->active_changed,
7683                 new_crtc_state->connectors_changed);
7684
7685         /* Remove stream for any changed/disabled CRTC */
7686         if (!enable) {
7687
7688                 if (!dm_old_crtc_state->stream)
7689                         goto skip_modeset;
7690
7691                 ret = dm_atomic_get_state(state, &dm_state);
7692                 if (ret)
7693                         goto fail;
7694
7695                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
7696                                 crtc->base.id);
7697
7698                 /* i.e. reset mode */
7699                 if (dc_remove_stream_from_ctx(
7700                                 dm->dc,
7701                                 dm_state->context,
7702                                 dm_old_crtc_state->stream) != DC_OK) {
7703                         ret = -EINVAL;
7704                         goto fail;
7705                 }
7706
7707                 dc_stream_release(dm_old_crtc_state->stream);
7708                 dm_new_crtc_state->stream = NULL;
7709
7710                 reset_freesync_config_for_crtc(dm_new_crtc_state);
7711
7712                 *lock_and_validation_needed = true;
7713
7714         } else {/* Add stream for any updated/enabled CRTC */
7715                 /*
7716                  * Quick fix to prevent NULL pointer on new_stream when
7717                  * added MST connectors not found in existing crtc_state in the chained mode
7718                  * TODO: need to dig out the root cause of that
7719                  */
7720                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
7721                         goto skip_modeset;
7722
7723                 if (modereset_required(new_crtc_state))
7724                         goto skip_modeset;
7725
7726                 if (modeset_required(new_crtc_state, new_stream,
7727                                      dm_old_crtc_state->stream)) {
7728
7729                         WARN_ON(dm_new_crtc_state->stream);
7730
7731                         ret = dm_atomic_get_state(state, &dm_state);
7732                         if (ret)
7733                                 goto fail;
7734
7735                         dm_new_crtc_state->stream = new_stream;
7736
7737                         dc_stream_retain(new_stream);
7738
7739                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
7740                                                 crtc->base.id);
7741
7742                         if (dc_add_stream_to_ctx(
7743                                         dm->dc,
7744                                         dm_state->context,
7745                                         dm_new_crtc_state->stream) != DC_OK) {
7746                                 ret = -EINVAL;
7747                                 goto fail;
7748                         }
7749
7750                         *lock_and_validation_needed = true;
7751                 }
7752         }
7753
7754 skip_modeset:
7755         /* Release extra reference */
7756         if (new_stream)
7757                  dc_stream_release(new_stream);
7758
7759         /*
7760          * We want to do dc stream updates that do not require a
7761          * full modeset below.
7762          */
7763         if (!(enable && aconnector && new_crtc_state->enable &&
7764               new_crtc_state->active))
7765                 return 0;
7766         /*
7767          * Given above conditions, the dc state cannot be NULL because:
7768          * 1. We're in the process of enabling CRTCs (just been added
7769          *    to the dc context, or already is on the context)
7770          * 2. Has a valid connector attached, and
7771          * 3. Is currently active and enabled.
7772          * => The dc stream state currently exists.
7773          */
7774         BUG_ON(dm_new_crtc_state->stream == NULL);
7775
7776         /* Scaling or underscan settings */
7777         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
7778                 update_stream_scaling_settings(
7779                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
7780
7781         /* ABM settings */
7782         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
7783
7784         /*
7785          * Color management settings. We also update color properties
7786          * when a modeset is needed, to ensure it gets reprogrammed.
7787          */
7788         if (dm_new_crtc_state->base.color_mgmt_changed ||
7789             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
7790                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
7791                 if (ret)
7792                         goto fail;
7793         }
7794
7795         /* Update Freesync settings. */
7796         get_freesync_config_for_crtc(dm_new_crtc_state,
7797                                      dm_new_conn_state);
7798
7799         return ret;
7800
7801 fail:
7802         if (new_stream)
7803                 dc_stream_release(new_stream);
7804         return ret;
7805 }
7806
7807 static bool should_reset_plane(struct drm_atomic_state *state,
7808                                struct drm_plane *plane,
7809                                struct drm_plane_state *old_plane_state,
7810                                struct drm_plane_state *new_plane_state)
7811 {
7812         struct drm_plane *other;
7813         struct drm_plane_state *old_other_state, *new_other_state;
7814         struct drm_crtc_state *new_crtc_state;
7815         int i;
7816
7817         /*
7818          * TODO: Remove this hack once the checks below are sufficient
7819          * enough to determine when we need to reset all the planes on
7820          * the stream.
7821          */
7822         if (state->allow_modeset)
7823                 return true;
7824
7825         /* Exit early if we know that we're adding or removing the plane. */
7826         if (old_plane_state->crtc != new_plane_state->crtc)
7827                 return true;
7828
7829         /* old crtc == new_crtc == NULL, plane not in context. */
7830         if (!new_plane_state->crtc)
7831                 return false;
7832
7833         new_crtc_state =
7834                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
7835
7836         if (!new_crtc_state)
7837                 return true;
7838
7839         /* CRTC Degamma changes currently require us to recreate planes. */
7840         if (new_crtc_state->color_mgmt_changed)
7841                 return true;
7842
7843         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
7844                 return true;
7845
7846         /*
7847          * If there are any new primary or overlay planes being added or
7848          * removed then the z-order can potentially change. To ensure
7849          * correct z-order and pipe acquisition the current DC architecture
7850          * requires us to remove and recreate all existing planes.
7851          *
7852          * TODO: Come up with a more elegant solution for this.
7853          */
7854         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
7855                 if (other->type == DRM_PLANE_TYPE_CURSOR)
7856                         continue;
7857
7858                 if (old_other_state->crtc != new_plane_state->crtc &&
7859                     new_other_state->crtc != new_plane_state->crtc)
7860                         continue;
7861
7862                 if (old_other_state->crtc != new_other_state->crtc)
7863                         return true;
7864
7865                 /* TODO: Remove this once we can handle fast format changes. */
7866                 if (old_other_state->fb && new_other_state->fb &&
7867                     old_other_state->fb->format != new_other_state->fb->format)
7868                         return true;
7869         }
7870
7871         return false;
7872 }
7873
7874 static int dm_update_plane_state(struct dc *dc,
7875                                  struct drm_atomic_state *state,
7876                                  struct drm_plane *plane,
7877                                  struct drm_plane_state *old_plane_state,
7878                                  struct drm_plane_state *new_plane_state,
7879                                  bool enable,
7880                                  bool *lock_and_validation_needed)
7881 {
7882
7883         struct dm_atomic_state *dm_state = NULL;
7884         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
7885         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7886         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
7887         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
7888         struct amdgpu_crtc *new_acrtc;
7889         bool needs_reset;
7890         int ret = 0;
7891
7892
7893         new_plane_crtc = new_plane_state->crtc;
7894         old_plane_crtc = old_plane_state->crtc;
7895         dm_new_plane_state = to_dm_plane_state(new_plane_state);
7896         dm_old_plane_state = to_dm_plane_state(old_plane_state);
7897
7898         /*TODO Implement better atomic check for cursor plane */
7899         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
7900                 if (!enable || !new_plane_crtc ||
7901                         drm_atomic_plane_disabling(plane->state, new_plane_state))
7902                         return 0;
7903
7904                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
7905
7906                 if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
7907                         (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
7908                         DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
7909                                                          new_plane_state->crtc_w, new_plane_state->crtc_h);
7910                         return -EINVAL;
7911                 }
7912
7913                 if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
7914                         new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
7915                         DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
7916                                                          new_plane_state->crtc_x, new_plane_state->crtc_y);
7917                         return -EINVAL;
7918                 }
7919
7920                 return 0;
7921         }
7922
7923         needs_reset = should_reset_plane(state, plane, old_plane_state,
7924                                          new_plane_state);
7925
7926         /* Remove any changed/removed planes */
7927         if (!enable) {
7928                 if (!needs_reset)
7929                         return 0;
7930
7931                 if (!old_plane_crtc)
7932                         return 0;
7933
7934                 old_crtc_state = drm_atomic_get_old_crtc_state(
7935                                 state, old_plane_crtc);
7936                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7937
7938                 if (!dm_old_crtc_state->stream)
7939                         return 0;
7940
7941                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
7942                                 plane->base.id, old_plane_crtc->base.id);
7943
7944                 ret = dm_atomic_get_state(state, &dm_state);
7945                 if (ret)
7946                         return ret;
7947
7948                 if (!dc_remove_plane_from_context(
7949                                 dc,
7950                                 dm_old_crtc_state->stream,
7951                                 dm_old_plane_state->dc_state,
7952                                 dm_state->context)) {
7953
7954                         ret = EINVAL;
7955                         return ret;
7956                 }
7957
7958
7959                 dc_plane_state_release(dm_old_plane_state->dc_state);
7960                 dm_new_plane_state->dc_state = NULL;
7961
7962                 *lock_and_validation_needed = true;
7963
7964         } else { /* Add new planes */
7965                 struct dc_plane_state *dc_new_plane_state;
7966
7967                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
7968                         return 0;
7969
7970                 if (!new_plane_crtc)
7971                         return 0;
7972
7973                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
7974                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7975
7976                 if (!dm_new_crtc_state->stream)
7977                         return 0;
7978
7979                 if (!needs_reset)
7980                         return 0;
7981
7982                 WARN_ON(dm_new_plane_state->dc_state);
7983
7984                 dc_new_plane_state = dc_create_plane_state(dc);
7985                 if (!dc_new_plane_state)
7986                         return -ENOMEM;
7987
7988                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
7989                                 plane->base.id, new_plane_crtc->base.id);
7990
7991                 ret = fill_dc_plane_attributes(
7992                         new_plane_crtc->dev->dev_private,
7993                         dc_new_plane_state,
7994                         new_plane_state,
7995                         new_crtc_state);
7996                 if (ret) {
7997                         dc_plane_state_release(dc_new_plane_state);
7998                         return ret;
7999                 }
8000
8001                 ret = dm_atomic_get_state(state, &dm_state);
8002                 if (ret) {
8003                         dc_plane_state_release(dc_new_plane_state);
8004                         return ret;
8005                 }
8006
8007                 /*
8008                  * Any atomic check errors that occur after this will
8009                  * not need a release. The plane state will be attached
8010                  * to the stream, and therefore part of the atomic
8011                  * state. It'll be released when the atomic state is
8012                  * cleaned.
8013                  */
8014                 if (!dc_add_plane_to_context(
8015                                 dc,
8016                                 dm_new_crtc_state->stream,
8017                                 dc_new_plane_state,
8018                                 dm_state->context)) {
8019
8020                         dc_plane_state_release(dc_new_plane_state);
8021                         return -EINVAL;
8022                 }
8023
8024                 dm_new_plane_state->dc_state = dc_new_plane_state;
8025
8026                 /* Tell DC to do a full surface update every time there
8027                  * is a plane change. Inefficient, but works for now.
8028                  */
8029                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8030
8031                 *lock_and_validation_needed = true;
8032         }
8033
8034
8035         return ret;
8036 }
8037
8038 static int
8039 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
8040                                     struct drm_atomic_state *state,
8041                                     enum surface_update_type *out_type)
8042 {
8043         struct dc *dc = dm->dc;
8044         struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
8045         int i, j, num_plane, ret = 0;
8046         struct drm_plane_state *old_plane_state, *new_plane_state;
8047         struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
8048         struct drm_crtc *new_plane_crtc;
8049         struct drm_plane *plane;
8050
8051         struct drm_crtc *crtc;
8052         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
8053         struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
8054         struct dc_stream_status *status = NULL;
8055         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8056         struct surface_info_bundle {
8057                 struct dc_surface_update surface_updates[MAX_SURFACES];
8058                 struct dc_plane_info plane_infos[MAX_SURFACES];
8059                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8060                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8061                 struct dc_stream_update stream_update;
8062         } *bundle;
8063
8064         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8065
8066         if (!bundle) {
8067                 DRM_ERROR("Failed to allocate update bundle\n");
8068                 /* Set type to FULL to avoid crashing in DC*/
8069                 update_type = UPDATE_TYPE_FULL;
8070                 goto cleanup;
8071         }
8072
8073         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8074
8075                 memset(bundle, 0, sizeof(struct surface_info_bundle));
8076
8077                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8078                 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
8079                 num_plane = 0;
8080
8081                 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
8082                         update_type = UPDATE_TYPE_FULL;
8083                         goto cleanup;
8084                 }
8085
8086                 if (!new_dm_crtc_state->stream)
8087                         continue;
8088
8089                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
8090                         const struct amdgpu_framebuffer *amdgpu_fb =
8091                                 to_amdgpu_framebuffer(new_plane_state->fb);
8092                         struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane];
8093                         struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
8094                         struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
8095                         uint64_t tiling_flags;
8096                         bool tmz_surface = false;
8097
8098                         new_plane_crtc = new_plane_state->crtc;
8099                         new_dm_plane_state = to_dm_plane_state(new_plane_state);
8100                         old_dm_plane_state = to_dm_plane_state(old_plane_state);
8101
8102                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8103                                 continue;
8104
8105                         if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
8106                                 update_type = UPDATE_TYPE_FULL;
8107                                 goto cleanup;
8108                         }
8109
8110                         if (crtc != new_plane_crtc)
8111                                 continue;
8112
8113                         bundle->surface_updates[num_plane].surface =
8114                                         new_dm_plane_state->dc_state;
8115
8116                         if (new_crtc_state->mode_changed) {
8117                                 bundle->stream_update.dst = new_dm_crtc_state->stream->dst;
8118                                 bundle->stream_update.src = new_dm_crtc_state->stream->src;
8119                         }
8120
8121                         if (new_crtc_state->color_mgmt_changed) {
8122                                 bundle->surface_updates[num_plane].gamma =
8123                                                 new_dm_plane_state->dc_state->gamma_correction;
8124                                 bundle->surface_updates[num_plane].in_transfer_func =
8125                                                 new_dm_plane_state->dc_state->in_transfer_func;
8126                                 bundle->surface_updates[num_plane].gamut_remap_matrix =
8127                                                 &new_dm_plane_state->dc_state->gamut_remap_matrix;
8128                                 bundle->stream_update.gamut_remap =
8129                                                 &new_dm_crtc_state->stream->gamut_remap_matrix;
8130                                 bundle->stream_update.output_csc_transform =
8131                                                 &new_dm_crtc_state->stream->csc_color_matrix;
8132                                 bundle->stream_update.out_transfer_func =
8133                                                 new_dm_crtc_state->stream->out_transfer_func;
8134                         }
8135
8136                         ret = fill_dc_scaling_info(new_plane_state,
8137                                                    scaling_info);
8138                         if (ret)
8139                                 goto cleanup;
8140
8141                         bundle->surface_updates[num_plane].scaling_info = scaling_info;
8142
8143                         if (amdgpu_fb) {
8144                                 ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
8145                                 if (ret)
8146                                         goto cleanup;
8147
8148                                 ret = fill_dc_plane_info_and_addr(
8149                                         dm->adev, new_plane_state, tiling_flags,
8150                                         plane_info,
8151                                         &flip_addr->address, tmz_surface,
8152                                         false);
8153                                 if (ret)
8154                                         goto cleanup;
8155
8156                                 bundle->surface_updates[num_plane].plane_info = plane_info;
8157                                 bundle->surface_updates[num_plane].flip_addr = flip_addr;
8158                         }
8159
8160                         num_plane++;
8161                 }
8162
8163                 if (num_plane == 0)
8164                         continue;
8165
8166                 ret = dm_atomic_get_state(state, &dm_state);
8167                 if (ret)
8168                         goto cleanup;
8169
8170                 old_dm_state = dm_atomic_get_old_state(state);
8171                 if (!old_dm_state) {
8172                         ret = -EINVAL;
8173                         goto cleanup;
8174                 }
8175
8176                 status = dc_stream_get_status_from_state(old_dm_state->context,
8177                                                          new_dm_crtc_state->stream);
8178                 bundle->stream_update.stream = new_dm_crtc_state->stream;
8179                 /*
8180                  * TODO: DC modifies the surface during this call so we need
8181                  * to lock here - find a way to do this without locking.
8182                  */
8183                 mutex_lock(&dm->dc_lock);
8184                 update_type = dc_check_update_surfaces_for_stream(
8185                                 dc,     bundle->surface_updates, num_plane,
8186                                 &bundle->stream_update, status);
8187                 mutex_unlock(&dm->dc_lock);
8188
8189                 if (update_type > UPDATE_TYPE_MED) {
8190                         update_type = UPDATE_TYPE_FULL;
8191                         goto cleanup;
8192                 }
8193         }
8194
8195 cleanup:
8196         kfree(bundle);
8197
8198         *out_type = update_type;
8199         return ret;
8200 }
8201
8202 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8203 {
8204         struct drm_connector *connector;
8205         struct drm_connector_state *conn_state;
8206         struct amdgpu_dm_connector *aconnector = NULL;
8207         int i;
8208         for_each_new_connector_in_state(state, connector, conn_state, i) {
8209                 if (conn_state->crtc != crtc)
8210                         continue;
8211
8212                 aconnector = to_amdgpu_dm_connector(connector);
8213                 if (!aconnector->port || !aconnector->mst_port)
8214                         aconnector = NULL;
8215                 else
8216                         break;
8217         }
8218
8219         if (!aconnector)
8220                 return 0;
8221
8222         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8223 }
8224
8225 /**
8226  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8227  * @dev: The DRM device
8228  * @state: The atomic state to commit
8229  *
8230  * Validate that the given atomic state is programmable by DC into hardware.
8231  * This involves constructing a &struct dc_state reflecting the new hardware
8232  * state we wish to commit, then querying DC to see if it is programmable. It's
8233  * important not to modify the existing DC state. Otherwise, atomic_check
8234  * may unexpectedly commit hardware changes.
8235  *
8236  * When validating the DC state, it's important that the right locks are
8237  * acquired. For full updates case which removes/adds/updates streams on one
8238  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8239  * that any such full update commit will wait for completion of any outstanding
8240  * flip using DRMs synchronization events. See
8241  * dm_determine_update_type_for_commit()
8242  *
8243  * Note that DM adds the affected connectors for all CRTCs in state, when that
8244  * might not seem necessary. This is because DC stream creation requires the
8245  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8246  * be possible but non-trivial - a possible TODO item.
8247  *
8248  * Return: -Error code if validation failed.
8249  */
8250 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8251                                   struct drm_atomic_state *state)
8252 {
8253         struct amdgpu_device *adev = dev->dev_private;
8254         struct dm_atomic_state *dm_state = NULL;
8255         struct dc *dc = adev->dm.dc;
8256         struct drm_connector *connector;
8257         struct drm_connector_state *old_con_state, *new_con_state;
8258         struct drm_crtc *crtc;
8259         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8260         struct drm_plane *plane;
8261         struct drm_plane_state *old_plane_state, *new_plane_state;
8262         enum surface_update_type update_type = UPDATE_TYPE_FAST;
8263         enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
8264
8265         int ret, i;
8266
8267         /*
8268          * This bool will be set for true for any modeset/reset
8269          * or plane update which implies non fast surface update.
8270          */
8271         bool lock_and_validation_needed = false;
8272
8273         ret = drm_atomic_helper_check_modeset(dev, state);
8274         if (ret)
8275                 goto fail;
8276
8277         if (adev->asic_type >= CHIP_NAVI10) {
8278                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8279                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8280                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
8281                                 if (ret)
8282                                         goto fail;
8283                         }
8284                 }
8285         }
8286
8287         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8288                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8289                     !new_crtc_state->color_mgmt_changed &&
8290                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8291                         continue;
8292
8293                 if (!new_crtc_state->enable)
8294                         continue;
8295
8296                 ret = drm_atomic_add_affected_connectors(state, crtc);
8297                 if (ret)
8298                         return ret;
8299
8300                 ret = drm_atomic_add_affected_planes(state, crtc);
8301                 if (ret)
8302                         goto fail;
8303         }
8304
8305         /*
8306          * Add all primary and overlay planes on the CRTC to the state
8307          * whenever a plane is enabled to maintain correct z-ordering
8308          * and to enable fast surface updates.
8309          */
8310         drm_for_each_crtc(crtc, dev) {
8311                 bool modified = false;
8312
8313                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8314                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8315                                 continue;
8316
8317                         if (new_plane_state->crtc == crtc ||
8318                             old_plane_state->crtc == crtc) {
8319                                 modified = true;
8320                                 break;
8321                         }
8322                 }
8323
8324                 if (!modified)
8325                         continue;
8326
8327                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8328                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
8329                                 continue;
8330
8331                         new_plane_state =
8332                                 drm_atomic_get_plane_state(state, plane);
8333
8334                         if (IS_ERR(new_plane_state)) {
8335                                 ret = PTR_ERR(new_plane_state);
8336                                 goto fail;
8337                         }
8338                 }
8339         }
8340
8341         /* Remove exiting planes if they are modified */
8342         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8343                 ret = dm_update_plane_state(dc, state, plane,
8344                                             old_plane_state,
8345                                             new_plane_state,
8346                                             false,
8347                                             &lock_and_validation_needed);
8348                 if (ret)
8349                         goto fail;
8350         }
8351
8352         /* Disable all crtcs which require disable */
8353         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8354                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8355                                            old_crtc_state,
8356                                            new_crtc_state,
8357                                            false,
8358                                            &lock_and_validation_needed);
8359                 if (ret)
8360                         goto fail;
8361         }
8362
8363         /* Enable all crtcs which require enable */
8364         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8365                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
8366                                            old_crtc_state,
8367                                            new_crtc_state,
8368                                            true,
8369                                            &lock_and_validation_needed);
8370                 if (ret)
8371                         goto fail;
8372         }
8373
8374         /* Add new/modified planes */
8375         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8376                 ret = dm_update_plane_state(dc, state, plane,
8377                                             old_plane_state,
8378                                             new_plane_state,
8379                                             true,
8380                                             &lock_and_validation_needed);
8381                 if (ret)
8382                         goto fail;
8383         }
8384
8385         /* Run this here since we want to validate the streams we created */
8386         ret = drm_atomic_helper_check_planes(dev, state);
8387         if (ret)
8388                 goto fail;
8389
8390         if (state->legacy_cursor_update) {
8391                 /*
8392                  * This is a fast cursor update coming from the plane update
8393                  * helper, check if it can be done asynchronously for better
8394                  * performance.
8395                  */
8396                 state->async_update =
8397                         !drm_atomic_helper_async_check(dev, state);
8398
8399                 /*
8400                  * Skip the remaining global validation if this is an async
8401                  * update. Cursor updates can be done without affecting
8402                  * state or bandwidth calcs and this avoids the performance
8403                  * penalty of locking the private state object and
8404                  * allocating a new dc_state.
8405                  */
8406                 if (state->async_update)
8407                         return 0;
8408         }
8409
8410         /* Check scaling and underscan changes*/
8411         /* TODO Removed scaling changes validation due to inability to commit
8412          * new stream into context w\o causing full reset. Need to
8413          * decide how to handle.
8414          */
8415         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8416                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8417                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8418                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8419
8420                 /* Skip any modesets/resets */
8421                 if (!acrtc || drm_atomic_crtc_needs_modeset(
8422                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8423                         continue;
8424
8425                 /* Skip any thing not scale or underscan changes */
8426                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8427                         continue;
8428
8429                 overall_update_type = UPDATE_TYPE_FULL;
8430                 lock_and_validation_needed = true;
8431         }
8432
8433         ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
8434         if (ret)
8435                 goto fail;
8436
8437         if (overall_update_type < update_type)
8438                 overall_update_type = update_type;
8439
8440         /*
8441          * lock_and_validation_needed was an old way to determine if we need to set
8442          * the global lock. Leaving it in to check if we broke any corner cases
8443          * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
8444          * lock_and_validation_needed false = UPDATE_TYPE_FAST
8445          */
8446         if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
8447                 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
8448
8449         if (overall_update_type > UPDATE_TYPE_FAST) {
8450                 ret = dm_atomic_get_state(state, &dm_state);
8451                 if (ret)
8452                         goto fail;
8453
8454                 ret = do_aquire_global_lock(dev, state);
8455                 if (ret)
8456                         goto fail;
8457
8458 #if defined(CONFIG_DRM_AMD_DC_DCN)
8459                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8460                         goto fail;
8461
8462                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8463                 if (ret)
8464                         goto fail;
8465 #endif
8466
8467                 /*
8468                  * Perform validation of MST topology in the state:
8469                  * We need to perform MST atomic check before calling
8470                  * dc_validate_global_state(), or there is a chance
8471                  * to get stuck in an infinite loop and hang eventually.
8472                  */
8473                 ret = drm_dp_mst_atomic_check(state);
8474                 if (ret)
8475                         goto fail;
8476
8477                 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
8478                         ret = -EINVAL;
8479                         goto fail;
8480                 }
8481         } else {
8482                 /*
8483                  * The commit is a fast update. Fast updates shouldn't change
8484                  * the DC context, affect global validation, and can have their
8485                  * commit work done in parallel with other commits not touching
8486                  * the same resource. If we have a new DC context as part of
8487                  * the DM atomic state from validation we need to free it and
8488                  * retain the existing one instead.
8489                  */
8490                 struct dm_atomic_state *new_dm_state, *old_dm_state;
8491
8492                 new_dm_state = dm_atomic_get_new_state(state);
8493                 old_dm_state = dm_atomic_get_old_state(state);
8494
8495                 if (new_dm_state && old_dm_state) {
8496                         if (new_dm_state->context)
8497                                 dc_release_state(new_dm_state->context);
8498
8499                         new_dm_state->context = old_dm_state->context;
8500
8501                         if (old_dm_state->context)
8502                                 dc_retain_state(old_dm_state->context);
8503                 }
8504         }
8505
8506         /* Store the overall update type for use later in atomic check. */
8507         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8508                 struct dm_crtc_state *dm_new_crtc_state =
8509                         to_dm_crtc_state(new_crtc_state);
8510
8511                 dm_new_crtc_state->update_type = (int)overall_update_type;
8512         }
8513
8514         /* Must be success */
8515         WARN_ON(ret);
8516         return ret;
8517
8518 fail:
8519         if (ret == -EDEADLK)
8520                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8521         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8522                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8523         else
8524                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8525
8526         return ret;
8527 }
8528
8529 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8530                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
8531 {
8532         uint8_t dpcd_data;
8533         bool capable = false;
8534
8535         if (amdgpu_dm_connector->dc_link &&
8536                 dm_helpers_dp_read_dpcd(
8537                                 NULL,
8538                                 amdgpu_dm_connector->dc_link,
8539                                 DP_DOWN_STREAM_PORT_COUNT,
8540                                 &dpcd_data,
8541                                 sizeof(dpcd_data))) {
8542                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8543         }
8544
8545         return capable;
8546 }
8547 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8548                                         struct edid *edid)
8549 {
8550         int i;
8551         bool edid_check_required;
8552         struct detailed_timing *timing;
8553         struct detailed_non_pixel *data;
8554         struct detailed_data_monitor_range *range;
8555         struct amdgpu_dm_connector *amdgpu_dm_connector =
8556                         to_amdgpu_dm_connector(connector);
8557         struct dm_connector_state *dm_con_state = NULL;
8558
8559         struct drm_device *dev = connector->dev;
8560         struct amdgpu_device *adev = dev->dev_private;
8561         bool freesync_capable = false;
8562
8563         if (!connector->state) {
8564                 DRM_ERROR("%s - Connector has no state", __func__);
8565                 goto update;
8566         }
8567
8568         if (!edid) {
8569                 dm_con_state = to_dm_connector_state(connector->state);
8570
8571                 amdgpu_dm_connector->min_vfreq = 0;
8572                 amdgpu_dm_connector->max_vfreq = 0;
8573                 amdgpu_dm_connector->pixel_clock_mhz = 0;
8574
8575                 goto update;
8576         }
8577
8578         dm_con_state = to_dm_connector_state(connector->state);
8579
8580         edid_check_required = false;
8581         if (!amdgpu_dm_connector->dc_sink) {
8582                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8583                 goto update;
8584         }
8585         if (!adev->dm.freesync_module)
8586                 goto update;
8587         /*
8588          * if edid non zero restrict freesync only for dp and edp
8589          */
8590         if (edid) {
8591                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8592                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8593                         edid_check_required = is_dp_capable_without_timing_msa(
8594                                                 adev->dm.dc,
8595                                                 amdgpu_dm_connector);
8596                 }
8597         }
8598         if (edid_check_required == true && (edid->version > 1 ||
8599            (edid->version == 1 && edid->revision > 1))) {
8600                 for (i = 0; i < 4; i++) {
8601
8602                         timing  = &edid->detailed_timings[i];
8603                         data    = &timing->data.other_data;
8604                         range   = &data->data.range;
8605                         /*
8606                          * Check if monitor has continuous frequency mode
8607                          */
8608                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
8609                                 continue;
8610                         /*
8611                          * Check for flag range limits only. If flag == 1 then
8612                          * no additional timing information provided.
8613                          * Default GTF, GTF Secondary curve and CVT are not
8614                          * supported
8615                          */
8616                         if (range->flags != 1)
8617                                 continue;
8618
8619                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8620                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8621                         amdgpu_dm_connector->pixel_clock_mhz =
8622                                 range->pixel_clock_mhz * 10;
8623                         break;
8624                 }
8625
8626                 if (amdgpu_dm_connector->max_vfreq -
8627                     amdgpu_dm_connector->min_vfreq > 10) {
8628
8629                         freesync_capable = true;
8630                 }
8631         }
8632
8633 update:
8634         if (dm_con_state)
8635                 dm_con_state->freesync_capable = freesync_capable;
8636
8637         if (connector->vrr_capable_property)
8638                 drm_connector_set_vrr_capable_property(connector,
8639                                                        freesync_capable);
8640 }
8641
8642 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
8643 {
8644         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
8645
8646         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
8647                 return;
8648         if (link->type == dc_connection_none)
8649                 return;
8650         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
8651                                         dpcd_data, sizeof(dpcd_data))) {
8652                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
8653
8654                 if (dpcd_data[0] == 0) {
8655                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
8656                         link->psr_settings.psr_feature_enabled = false;
8657                 } else {
8658                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
8659                         link->psr_settings.psr_feature_enabled = true;
8660                 }
8661
8662                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
8663         }
8664 }
8665
8666 /*
8667  * amdgpu_dm_link_setup_psr() - configure psr link
8668  * @stream: stream state
8669  *
8670  * Return: true if success
8671  */
8672 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
8673 {
8674         struct dc_link *link = NULL;
8675         struct psr_config psr_config = {0};
8676         struct psr_context psr_context = {0};
8677         bool ret = false;
8678
8679         if (stream == NULL)
8680                 return false;
8681
8682         link = stream->link;
8683
8684         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
8685
8686         if (psr_config.psr_version > 0) {
8687                 psr_config.psr_exit_link_training_required = 0x1;
8688                 psr_config.psr_frame_capture_indication_req = 0;
8689                 psr_config.psr_rfb_setup_time = 0x37;
8690                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
8691                 psr_config.allow_smu_optimizations = 0x0;
8692
8693                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
8694
8695         }
8696         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
8697
8698         return ret;
8699 }
8700
8701 /*
8702  * amdgpu_dm_psr_enable() - enable psr f/w
8703  * @stream: stream state
8704  *
8705  * Return: true if success
8706  */
8707 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
8708 {
8709         struct dc_link *link = stream->link;
8710         unsigned int vsync_rate_hz = 0;
8711         struct dc_static_screen_params params = {0};
8712         /* Calculate number of static frames before generating interrupt to
8713          * enter PSR.
8714          */
8715         // Init fail safe of 2 frames static
8716         unsigned int num_frames_static = 2;
8717
8718         DRM_DEBUG_DRIVER("Enabling psr...\n");
8719
8720         vsync_rate_hz = div64_u64(div64_u64((
8721                         stream->timing.pix_clk_100hz * 100),
8722                         stream->timing.v_total),
8723                         stream->timing.h_total);
8724
8725         /* Round up
8726          * Calculate number of frames such that at least 30 ms of time has
8727          * passed.
8728          */
8729         if (vsync_rate_hz != 0) {
8730                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
8731                 num_frames_static = (30000 / frame_time_microsec) + 1;
8732         }
8733
8734         params.triggers.cursor_update = true;
8735         params.triggers.overlay_update = true;
8736         params.triggers.surface_update = true;
8737         params.num_frames = num_frames_static;
8738
8739         dc_stream_set_static_screen_params(link->ctx->dc,
8740                                            &stream, 1,
8741                                            &params);
8742
8743         return dc_link_set_psr_allow_active(link, true, false);
8744 }
8745
8746 /*
8747  * amdgpu_dm_psr_disable() - disable psr f/w
8748  * @stream:  stream state
8749  *
8750  * Return: true if success
8751  */
8752 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
8753 {
8754
8755         DRM_DEBUG_DRIVER("Disabling psr...\n");
8756
8757         return dc_link_set_psr_allow_active(stream->link, false, true);
8758 }