Merge tag 'i2c-for-6.8-rc1-rebased' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "link_enc_cfg.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "dc/dc_state.h"
41 #include "amdgpu_dm_trace.h"
42 #include "dpcd_defs.h"
43 #include "link/protocols/link_dpcd.h"
44 #include "link_service_types.h"
45 #include "link/protocols/link_dp_capability.h"
46 #include "link/protocols/link_ddc.h"
47
48 #include "vid.h"
49 #include "amdgpu.h"
50 #include "amdgpu_display.h"
51 #include "amdgpu_ucode.h"
52 #include "atom.h"
53 #include "amdgpu_dm.h"
54 #include "amdgpu_dm_plane.h"
55 #include "amdgpu_dm_crtc.h"
56 #include "amdgpu_dm_hdcp.h"
57 #include <drm/display/drm_hdcp_helper.h>
58 #include "amdgpu_dm_wb.h"
59 #include "amdgpu_pm.h"
60 #include "amdgpu_atombios.h"
61
62 #include "amd_shared.h"
63 #include "amdgpu_dm_irq.h"
64 #include "dm_helpers.h"
65 #include "amdgpu_dm_mst_types.h"
66 #if defined(CONFIG_DEBUG_FS)
67 #include "amdgpu_dm_debugfs.h"
68 #endif
69 #include "amdgpu_dm_psr.h"
70
71 #include "ivsrcid/ivsrcid_vislands30.h"
72
73 #include <linux/backlight.h>
74 #include <linux/module.h>
75 #include <linux/moduleparam.h>
76 #include <linux/types.h>
77 #include <linux/pm_runtime.h>
78 #include <linux/pci.h>
79 #include <linux/firmware.h>
80 #include <linux/component.h>
81 #include <linux/dmi.h>
82
83 #include <drm/display/drm_dp_mst_helper.h>
84 #include <drm/display/drm_hdmi_helper.h>
85 #include <drm/drm_atomic.h>
86 #include <drm/drm_atomic_uapi.h>
87 #include <drm/drm_atomic_helper.h>
88 #include <drm/drm_blend.h>
89 #include <drm/drm_fixed.h>
90 #include <drm/drm_fourcc.h>
91 #include <drm/drm_edid.h>
92 #include <drm/drm_eld.h>
93 #include <drm/drm_vblank.h>
94 #include <drm/drm_audio_component.h>
95 #include <drm/drm_gem_atomic_helper.h>
96
97 #include <acpi/video.h>
98
99 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
100
101 #include "dcn/dcn_1_0_offset.h"
102 #include "dcn/dcn_1_0_sh_mask.h"
103 #include "soc15_hw_ip.h"
104 #include "soc15_common.h"
105 #include "vega10_ip_offset.h"
106
107 #include "gc/gc_11_0_0_offset.h"
108 #include "gc/gc_11_0_0_sh_mask.h"
109
110 #include "modules/inc/mod_freesync.h"
111 #include "modules/power/power_helpers.h"
112
113 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
114 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
115 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
116 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
117 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
118 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
119 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
120 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
121 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
122 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
123 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
124 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
125 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
126 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
127 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
128 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
129 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
130 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
131 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
132 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
133 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
134 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
135
136 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
137 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
138 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
139 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
140
141 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
142 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
143
144 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
145 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
146
147 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
148 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
149
150 /* Number of bytes in PSP header for firmware. */
151 #define PSP_HEADER_BYTES 0x100
152
153 /* Number of bytes in PSP footer for firmware. */
154 #define PSP_FOOTER_BYTES 0x100
155
156 /**
157  * DOC: overview
158  *
159  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
160  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
161  * requests into DC requests, and DC responses into DRM responses.
162  *
163  * The root control structure is &struct amdgpu_display_manager.
164  */
165
166 /* basic init/fini API */
167 static int amdgpu_dm_init(struct amdgpu_device *adev);
168 static void amdgpu_dm_fini(struct amdgpu_device *adev);
169 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
170
171 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
172 {
173         switch (link->dpcd_caps.dongle_type) {
174         case DISPLAY_DONGLE_NONE:
175                 return DRM_MODE_SUBCONNECTOR_Native;
176         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
177                 return DRM_MODE_SUBCONNECTOR_VGA;
178         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
179         case DISPLAY_DONGLE_DP_DVI_DONGLE:
180                 return DRM_MODE_SUBCONNECTOR_DVID;
181         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
182         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
183                 return DRM_MODE_SUBCONNECTOR_HDMIA;
184         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
185         default:
186                 return DRM_MODE_SUBCONNECTOR_Unknown;
187         }
188 }
189
190 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
191 {
192         struct dc_link *link = aconnector->dc_link;
193         struct drm_connector *connector = &aconnector->base;
194         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
195
196         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
197                 return;
198
199         if (aconnector->dc_sink)
200                 subconnector = get_subconnector_type(link);
201
202         drm_object_property_set_value(&connector->base,
203                         connector->dev->mode_config.dp_subconnector_property,
204                         subconnector);
205 }
206
207 /*
208  * initializes drm_device display related structures, based on the information
209  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
210  * drm_encoder, drm_mode_config
211  *
212  * Returns 0 on success
213  */
214 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
215 /* removes and deallocates the drm structures, created by the above function */
216 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
217
218 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
219                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
220                                     u32 link_index,
221                                     struct amdgpu_encoder *amdgpu_encoder);
222 static int amdgpu_dm_encoder_init(struct drm_device *dev,
223                                   struct amdgpu_encoder *aencoder,
224                                   uint32_t link_index);
225
226 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
227
228 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
229
230 static int amdgpu_dm_atomic_check(struct drm_device *dev,
231                                   struct drm_atomic_state *state);
232
233 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
234 static void handle_hpd_rx_irq(void *param);
235
236 static bool
237 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
238                                  struct drm_crtc_state *new_crtc_state);
239 /*
240  * dm_vblank_get_counter
241  *
242  * @brief
243  * Get counter for number of vertical blanks
244  *
245  * @param
246  * struct amdgpu_device *adev - [in] desired amdgpu device
247  * int disp_idx - [in] which CRTC to get the counter from
248  *
249  * @return
250  * Counter for vertical blanks
251  */
252 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
253 {
254         struct amdgpu_crtc *acrtc = NULL;
255
256         if (crtc >= adev->mode_info.num_crtc)
257                 return 0;
258
259         acrtc = adev->mode_info.crtcs[crtc];
260
261         if (!acrtc->dm_irq_params.stream) {
262                 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263                           crtc);
264                 return 0;
265         }
266
267         return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
268 }
269
270 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
271                                   u32 *vbl, u32 *position)
272 {
273         u32 v_blank_start, v_blank_end, h_position, v_position;
274         struct amdgpu_crtc *acrtc = NULL;
275
276         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
277                 return -EINVAL;
278
279         acrtc = adev->mode_info.crtcs[crtc];
280
281         if (!acrtc->dm_irq_params.stream) {
282                 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
283                           crtc);
284                 return 0;
285         }
286
287         /*
288          * TODO rework base driver to use values directly.
289          * for now parse it back into reg-format
290          */
291         dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
292                                  &v_blank_start,
293                                  &v_blank_end,
294                                  &h_position,
295                                  &v_position);
296
297         *position = v_position | (h_position << 16);
298         *vbl = v_blank_start | (v_blank_end << 16);
299
300         return 0;
301 }
302
303 static bool dm_is_idle(void *handle)
304 {
305         /* XXX todo */
306         return true;
307 }
308
309 static int dm_wait_for_idle(void *handle)
310 {
311         /* XXX todo */
312         return 0;
313 }
314
315 static bool dm_check_soft_reset(void *handle)
316 {
317         return false;
318 }
319
320 static int dm_soft_reset(void *handle)
321 {
322         /* XXX todo */
323         return 0;
324 }
325
326 static struct amdgpu_crtc *
327 get_crtc_by_otg_inst(struct amdgpu_device *adev,
328                      int otg_inst)
329 {
330         struct drm_device *dev = adev_to_drm(adev);
331         struct drm_crtc *crtc;
332         struct amdgpu_crtc *amdgpu_crtc;
333
334         if (WARN_ON(otg_inst == -1))
335                 return adev->mode_info.crtcs[0];
336
337         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
338                 amdgpu_crtc = to_amdgpu_crtc(crtc);
339
340                 if (amdgpu_crtc->otg_inst == otg_inst)
341                         return amdgpu_crtc;
342         }
343
344         return NULL;
345 }
346
347 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348                                               struct dm_crtc_state *new_state)
349 {
350         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
351                 return true;
352         else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
353                 return true;
354         else
355                 return false;
356 }
357
358 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
359                                         int planes_count)
360 {
361         int i, j;
362
363         for (i = 0, j = planes_count - 1; i < j; i++, j--)
364                 swap(array_of_surface_update[i], array_of_surface_update[j]);
365 }
366
367 /**
368  * update_planes_and_stream_adapter() - Send planes to be updated in DC
369  *
370  * DC has a generic way to update planes and stream via
371  * dc_update_planes_and_stream function; however, DM might need some
372  * adjustments and preparation before calling it. This function is a wrapper
373  * for the dc_update_planes_and_stream that does any required configuration
374  * before passing control to DC.
375  *
376  * @dc: Display Core control structure
377  * @update_type: specify whether it is FULL/MEDIUM/FAST update
378  * @planes_count: planes count to update
379  * @stream: stream state
380  * @stream_update: stream update
381  * @array_of_surface_update: dc surface update pointer
382  *
383  */
384 static inline bool update_planes_and_stream_adapter(struct dc *dc,
385                                                     int update_type,
386                                                     int planes_count,
387                                                     struct dc_stream_state *stream,
388                                                     struct dc_stream_update *stream_update,
389                                                     struct dc_surface_update *array_of_surface_update)
390 {
391         reverse_planes_order(array_of_surface_update, planes_count);
392
393         /*
394          * Previous frame finished and HW is ready for optimization.
395          */
396         if (update_type == UPDATE_TYPE_FAST)
397                 dc_post_update_surfaces_to_stream(dc);
398
399         return dc_update_planes_and_stream(dc,
400                                            array_of_surface_update,
401                                            planes_count,
402                                            stream,
403                                            stream_update);
404 }
405
406 /**
407  * dm_pflip_high_irq() - Handle pageflip interrupt
408  * @interrupt_params: ignored
409  *
410  * Handles the pageflip interrupt by notifying all interested parties
411  * that the pageflip has been completed.
412  */
413 static void dm_pflip_high_irq(void *interrupt_params)
414 {
415         struct amdgpu_crtc *amdgpu_crtc;
416         struct common_irq_params *irq_params = interrupt_params;
417         struct amdgpu_device *adev = irq_params->adev;
418         struct drm_device *dev = adev_to_drm(adev);
419         unsigned long flags;
420         struct drm_pending_vblank_event *e;
421         u32 vpos, hpos, v_blank_start, v_blank_end;
422         bool vrr_active;
423
424         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
425
426         /* IRQ could occur when in initial stage */
427         /* TODO work and BO cleanup */
428         if (amdgpu_crtc == NULL) {
429                 drm_dbg_state(dev, "CRTC is null, returning.\n");
430                 return;
431         }
432
433         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
434
435         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
436                 drm_dbg_state(dev,
437                               "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
438                               amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
439                               amdgpu_crtc->crtc_id, amdgpu_crtc);
440                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
441                 return;
442         }
443
444         /* page flip completed. */
445         e = amdgpu_crtc->event;
446         amdgpu_crtc->event = NULL;
447
448         WARN_ON(!e);
449
450         vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
451
452         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
453         if (!vrr_active ||
454             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
455                                       &v_blank_end, &hpos, &vpos) ||
456             (vpos < v_blank_start)) {
457                 /* Update to correct count and vblank timestamp if racing with
458                  * vblank irq. This also updates to the correct vblank timestamp
459                  * even in VRR mode, as scanout is past the front-porch atm.
460                  */
461                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
462
463                 /* Wake up userspace by sending the pageflip event with proper
464                  * count and timestamp of vblank of flip completion.
465                  */
466                 if (e) {
467                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
468
469                         /* Event sent, so done with vblank for this flip */
470                         drm_crtc_vblank_put(&amdgpu_crtc->base);
471                 }
472         } else if (e) {
473                 /* VRR active and inside front-porch: vblank count and
474                  * timestamp for pageflip event will only be up to date after
475                  * drm_crtc_handle_vblank() has been executed from late vblank
476                  * irq handler after start of back-porch (vline 0). We queue the
477                  * pageflip event for send-out by drm_crtc_handle_vblank() with
478                  * updated timestamp and count, once it runs after us.
479                  *
480                  * We need to open-code this instead of using the helper
481                  * drm_crtc_arm_vblank_event(), as that helper would
482                  * call drm_crtc_accurate_vblank_count(), which we must
483                  * not call in VRR mode while we are in front-porch!
484                  */
485
486                 /* sequence will be replaced by real count during send-out. */
487                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
488                 e->pipe = amdgpu_crtc->crtc_id;
489
490                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
491                 e = NULL;
492         }
493
494         /* Keep track of vblank of this flip for flip throttling. We use the
495          * cooked hw counter, as that one incremented at start of this vblank
496          * of pageflip completion, so last_flip_vblank is the forbidden count
497          * for queueing new pageflips if vsync + VRR is enabled.
498          */
499         amdgpu_crtc->dm_irq_params.last_flip_vblank =
500                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
501
502         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
503         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
504
505         drm_dbg_state(dev,
506                       "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
507                       amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
508 }
509
510 static void dm_vupdate_high_irq(void *interrupt_params)
511 {
512         struct common_irq_params *irq_params = interrupt_params;
513         struct amdgpu_device *adev = irq_params->adev;
514         struct amdgpu_crtc *acrtc;
515         struct drm_device *drm_dev;
516         struct drm_vblank_crtc *vblank;
517         ktime_t frame_duration_ns, previous_timestamp;
518         unsigned long flags;
519         int vrr_active;
520
521         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
522
523         if (acrtc) {
524                 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
525                 drm_dev = acrtc->base.dev;
526                 vblank = &drm_dev->vblank[acrtc->base.index];
527                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
528                 frame_duration_ns = vblank->time - previous_timestamp;
529
530                 if (frame_duration_ns > 0) {
531                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
532                                                 frame_duration_ns,
533                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
534                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
535                 }
536
537                 drm_dbg_vbl(drm_dev,
538                             "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
539                             vrr_active);
540
541                 /* Core vblank handling is done here after end of front-porch in
542                  * vrr mode, as vblank timestamping will give valid results
543                  * while now done after front-porch. This will also deliver
544                  * page-flip completion events that have been queued to us
545                  * if a pageflip happened inside front-porch.
546                  */
547                 if (vrr_active) {
548                         amdgpu_dm_crtc_handle_vblank(acrtc);
549
550                         /* BTR processing for pre-DCE12 ASICs */
551                         if (acrtc->dm_irq_params.stream &&
552                             adev->family < AMDGPU_FAMILY_AI) {
553                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
554                                 mod_freesync_handle_v_update(
555                                     adev->dm.freesync_module,
556                                     acrtc->dm_irq_params.stream,
557                                     &acrtc->dm_irq_params.vrr_params);
558
559                                 dc_stream_adjust_vmin_vmax(
560                                     adev->dm.dc,
561                                     acrtc->dm_irq_params.stream,
562                                     &acrtc->dm_irq_params.vrr_params.adjust);
563                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
564                         }
565                 }
566         }
567 }
568
569 /**
570  * dm_crtc_high_irq() - Handles CRTC interrupt
571  * @interrupt_params: used for determining the CRTC instance
572  *
573  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
574  * event handler.
575  */
576 static void dm_crtc_high_irq(void *interrupt_params)
577 {
578         struct common_irq_params *irq_params = interrupt_params;
579         struct amdgpu_device *adev = irq_params->adev;
580         struct drm_writeback_job *job;
581         struct amdgpu_crtc *acrtc;
582         unsigned long flags;
583         int vrr_active;
584
585         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
586         if (!acrtc)
587                 return;
588
589         if (acrtc->wb_pending) {
590                 if (acrtc->wb_conn) {
591                         spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
592                         job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
593                                                        struct drm_writeback_job,
594                                                        list_entry);
595                         spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
596
597                         if (job) {
598                                 unsigned int v_total, refresh_hz;
599                                 struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
600
601                                 v_total = stream->adjust.v_total_max ?
602                                           stream->adjust.v_total_max : stream->timing.v_total;
603                                 refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
604                                              100LL, (v_total * stream->timing.h_total));
605                                 mdelay(1000 / refresh_hz);
606
607                                 drm_writeback_signal_completion(acrtc->wb_conn, 0);
608                                 dc_stream_fc_disable_writeback(adev->dm.dc,
609                                                                acrtc->dm_irq_params.stream, 0);
610                         }
611                 } else
612                         DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
613                 acrtc->wb_pending = false;
614         }
615
616         vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
617
618         drm_dbg_vbl(adev_to_drm(adev),
619                     "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
620                     vrr_active, acrtc->dm_irq_params.active_planes);
621
622         /**
623          * Core vblank handling at start of front-porch is only possible
624          * in non-vrr mode, as only there vblank timestamping will give
625          * valid results while done in front-porch. Otherwise defer it
626          * to dm_vupdate_high_irq after end of front-porch.
627          */
628         if (!vrr_active)
629                 amdgpu_dm_crtc_handle_vblank(acrtc);
630
631         /**
632          * Following stuff must happen at start of vblank, for crc
633          * computation and below-the-range btr support in vrr mode.
634          */
635         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
636
637         /* BTR updates need to happen before VUPDATE on Vega and above. */
638         if (adev->family < AMDGPU_FAMILY_AI)
639                 return;
640
641         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
642
643         if (acrtc->dm_irq_params.stream &&
644             acrtc->dm_irq_params.vrr_params.supported &&
645             acrtc->dm_irq_params.freesync_config.state ==
646                     VRR_STATE_ACTIVE_VARIABLE) {
647                 mod_freesync_handle_v_update(adev->dm.freesync_module,
648                                              acrtc->dm_irq_params.stream,
649                                              &acrtc->dm_irq_params.vrr_params);
650
651                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
652                                            &acrtc->dm_irq_params.vrr_params.adjust);
653         }
654
655         /*
656          * If there aren't any active_planes then DCH HUBP may be clock-gated.
657          * In that case, pageflip completion interrupts won't fire and pageflip
658          * completion events won't get delivered. Prevent this by sending
659          * pending pageflip events from here if a flip is still pending.
660          *
661          * If any planes are enabled, use dm_pflip_high_irq() instead, to
662          * avoid race conditions between flip programming and completion,
663          * which could cause too early flip completion events.
664          */
665         if (adev->family >= AMDGPU_FAMILY_RV &&
666             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
667             acrtc->dm_irq_params.active_planes == 0) {
668                 if (acrtc->event) {
669                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
670                         acrtc->event = NULL;
671                         drm_crtc_vblank_put(&acrtc->base);
672                 }
673                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
674         }
675
676         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
677 }
678
679 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
680 /**
681  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
682  * DCN generation ASICs
683  * @interrupt_params: interrupt parameters
684  *
685  * Used to set crc window/read out crc value at vertical line 0 position
686  */
687 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
688 {
689         struct common_irq_params *irq_params = interrupt_params;
690         struct amdgpu_device *adev = irq_params->adev;
691         struct amdgpu_crtc *acrtc;
692
693         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
694
695         if (!acrtc)
696                 return;
697
698         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
699 }
700 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
701
702 /**
703  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
704  * @adev: amdgpu_device pointer
705  * @notify: dmub notification structure
706  *
707  * Dmub AUX or SET_CONFIG command completion processing callback
708  * Copies dmub notification to DM which is to be read by AUX command.
709  * issuing thread and also signals the event to wake up the thread.
710  */
711 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
712                                         struct dmub_notification *notify)
713 {
714         if (adev->dm.dmub_notify)
715                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
716         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
717                 complete(&adev->dm.dmub_aux_transfer_done);
718 }
719
720 /**
721  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
722  * @adev: amdgpu_device pointer
723  * @notify: dmub notification structure
724  *
725  * Dmub Hpd interrupt processing callback. Gets displayindex through the
726  * ink index and calls helper to do the processing.
727  */
728 static void dmub_hpd_callback(struct amdgpu_device *adev,
729                               struct dmub_notification *notify)
730 {
731         struct amdgpu_dm_connector *aconnector;
732         struct amdgpu_dm_connector *hpd_aconnector = NULL;
733         struct drm_connector *connector;
734         struct drm_connector_list_iter iter;
735         struct dc_link *link;
736         u8 link_index = 0;
737         struct drm_device *dev;
738
739         if (adev == NULL)
740                 return;
741
742         if (notify == NULL) {
743                 DRM_ERROR("DMUB HPD callback notification was NULL");
744                 return;
745         }
746
747         if (notify->link_index > adev->dm.dc->link_count) {
748                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
749                 return;
750         }
751
752         link_index = notify->link_index;
753         link = adev->dm.dc->links[link_index];
754         dev = adev->dm.ddev;
755
756         drm_connector_list_iter_begin(dev, &iter);
757         drm_for_each_connector_iter(connector, &iter) {
758
759                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
760                         continue;
761
762                 aconnector = to_amdgpu_dm_connector(connector);
763                 if (link && aconnector->dc_link == link) {
764                         if (notify->type == DMUB_NOTIFICATION_HPD)
765                                 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
766                         else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
767                                 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
768                         else
769                                 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
770                                                 notify->type, link_index);
771
772                         hpd_aconnector = aconnector;
773                         break;
774                 }
775         }
776         drm_connector_list_iter_end(&iter);
777
778         if (hpd_aconnector) {
779                 if (notify->type == DMUB_NOTIFICATION_HPD)
780                         handle_hpd_irq_helper(hpd_aconnector);
781                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
782                         handle_hpd_rx_irq(hpd_aconnector);
783         }
784 }
785
786 /**
787  * register_dmub_notify_callback - Sets callback for DMUB notify
788  * @adev: amdgpu_device pointer
789  * @type: Type of dmub notification
790  * @callback: Dmub interrupt callback function
791  * @dmub_int_thread_offload: offload indicator
792  *
793  * API to register a dmub callback handler for a dmub notification
794  * Also sets indicator whether callback processing to be offloaded.
795  * to dmub interrupt handling thread
796  * Return: true if successfully registered, false if there is existing registration
797  */
798 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
799                                           enum dmub_notification_type type,
800                                           dmub_notify_interrupt_callback_t callback,
801                                           bool dmub_int_thread_offload)
802 {
803         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
804                 adev->dm.dmub_callback[type] = callback;
805                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
806         } else
807                 return false;
808
809         return true;
810 }
811
812 static void dm_handle_hpd_work(struct work_struct *work)
813 {
814         struct dmub_hpd_work *dmub_hpd_wrk;
815
816         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
817
818         if (!dmub_hpd_wrk->dmub_notify) {
819                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
820                 return;
821         }
822
823         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
824                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
825                 dmub_hpd_wrk->dmub_notify);
826         }
827
828         kfree(dmub_hpd_wrk->dmub_notify);
829         kfree(dmub_hpd_wrk);
830
831 }
832
833 #define DMUB_TRACE_MAX_READ 64
834 /**
835  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
836  * @interrupt_params: used for determining the Outbox instance
837  *
838  * Handles the Outbox Interrupt
839  * event handler.
840  */
841 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
842 {
843         struct dmub_notification notify;
844         struct common_irq_params *irq_params = interrupt_params;
845         struct amdgpu_device *adev = irq_params->adev;
846         struct amdgpu_display_manager *dm = &adev->dm;
847         struct dmcub_trace_buf_entry entry = { 0 };
848         u32 count = 0;
849         struct dmub_hpd_work *dmub_hpd_wrk;
850         struct dc_link *plink = NULL;
851
852         if (dc_enable_dmub_notifications(adev->dm.dc) &&
853                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
854
855                 do {
856                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
857                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
858                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
859                                 continue;
860                         }
861                         if (!dm->dmub_callback[notify.type]) {
862                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
863                                 continue;
864                         }
865                         if (dm->dmub_thread_offload[notify.type] == true) {
866                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
867                                 if (!dmub_hpd_wrk) {
868                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
869                                         return;
870                                 }
871                                 dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
872                                                                     GFP_ATOMIC);
873                                 if (!dmub_hpd_wrk->dmub_notify) {
874                                         kfree(dmub_hpd_wrk);
875                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
876                                         return;
877                                 }
878                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
879                                 dmub_hpd_wrk->adev = adev;
880                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
881                                         plink = adev->dm.dc->links[notify.link_index];
882                                         if (plink) {
883                                                 plink->hpd_status =
884                                                         notify.hpd_status == DP_HPD_PLUG;
885                                         }
886                                 }
887                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
888                         } else {
889                                 dm->dmub_callback[notify.type](adev, &notify);
890                         }
891                 } while (notify.pending_notification);
892         }
893
894
895         do {
896                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
897                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
898                                                         entry.param0, entry.param1);
899
900                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
901                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
902                 } else
903                         break;
904
905                 count++;
906
907         } while (count <= DMUB_TRACE_MAX_READ);
908
909         if (count > DMUB_TRACE_MAX_READ)
910                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
911 }
912
913 static int dm_set_clockgating_state(void *handle,
914                   enum amd_clockgating_state state)
915 {
916         return 0;
917 }
918
919 static int dm_set_powergating_state(void *handle,
920                   enum amd_powergating_state state)
921 {
922         return 0;
923 }
924
925 /* Prototypes of private functions */
926 static int dm_early_init(void *handle);
927
928 /* Allocate memory for FBC compressed data  */
929 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
930 {
931         struct amdgpu_device *adev = drm_to_adev(connector->dev);
932         struct dm_compressor_info *compressor = &adev->dm.compressor;
933         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
934         struct drm_display_mode *mode;
935         unsigned long max_size = 0;
936
937         if (adev->dm.dc->fbc_compressor == NULL)
938                 return;
939
940         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
941                 return;
942
943         if (compressor->bo_ptr)
944                 return;
945
946
947         list_for_each_entry(mode, &connector->modes, head) {
948                 if (max_size < mode->htotal * mode->vtotal)
949                         max_size = mode->htotal * mode->vtotal;
950         }
951
952         if (max_size) {
953                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
954                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
955                             &compressor->gpu_addr, &compressor->cpu_addr);
956
957                 if (r)
958                         DRM_ERROR("DM: Failed to initialize FBC\n");
959                 else {
960                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
961                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
962                 }
963
964         }
965
966 }
967
968 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
969                                           int pipe, bool *enabled,
970                                           unsigned char *buf, int max_bytes)
971 {
972         struct drm_device *dev = dev_get_drvdata(kdev);
973         struct amdgpu_device *adev = drm_to_adev(dev);
974         struct drm_connector *connector;
975         struct drm_connector_list_iter conn_iter;
976         struct amdgpu_dm_connector *aconnector;
977         int ret = 0;
978
979         *enabled = false;
980
981         mutex_lock(&adev->dm.audio_lock);
982
983         drm_connector_list_iter_begin(dev, &conn_iter);
984         drm_for_each_connector_iter(connector, &conn_iter) {
985
986                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
987                         continue;
988
989                 aconnector = to_amdgpu_dm_connector(connector);
990                 if (aconnector->audio_inst != port)
991                         continue;
992
993                 *enabled = true;
994                 ret = drm_eld_size(connector->eld);
995                 memcpy(buf, connector->eld, min(max_bytes, ret));
996
997                 break;
998         }
999         drm_connector_list_iter_end(&conn_iter);
1000
1001         mutex_unlock(&adev->dm.audio_lock);
1002
1003         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
1004
1005         return ret;
1006 }
1007
1008 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
1009         .get_eld = amdgpu_dm_audio_component_get_eld,
1010 };
1011
1012 static int amdgpu_dm_audio_component_bind(struct device *kdev,
1013                                        struct device *hda_kdev, void *data)
1014 {
1015         struct drm_device *dev = dev_get_drvdata(kdev);
1016         struct amdgpu_device *adev = drm_to_adev(dev);
1017         struct drm_audio_component *acomp = data;
1018
1019         acomp->ops = &amdgpu_dm_audio_component_ops;
1020         acomp->dev = kdev;
1021         adev->dm.audio_component = acomp;
1022
1023         return 0;
1024 }
1025
1026 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
1027                                           struct device *hda_kdev, void *data)
1028 {
1029         struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
1030         struct drm_audio_component *acomp = data;
1031
1032         acomp->ops = NULL;
1033         acomp->dev = NULL;
1034         adev->dm.audio_component = NULL;
1035 }
1036
1037 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
1038         .bind   = amdgpu_dm_audio_component_bind,
1039         .unbind = amdgpu_dm_audio_component_unbind,
1040 };
1041
1042 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
1043 {
1044         int i, ret;
1045
1046         if (!amdgpu_audio)
1047                 return 0;
1048
1049         adev->mode_info.audio.enabled = true;
1050
1051         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1052
1053         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1054                 adev->mode_info.audio.pin[i].channels = -1;
1055                 adev->mode_info.audio.pin[i].rate = -1;
1056                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1057                 adev->mode_info.audio.pin[i].status_bits = 0;
1058                 adev->mode_info.audio.pin[i].category_code = 0;
1059                 adev->mode_info.audio.pin[i].connected = false;
1060                 adev->mode_info.audio.pin[i].id =
1061                         adev->dm.dc->res_pool->audios[i]->inst;
1062                 adev->mode_info.audio.pin[i].offset = 0;
1063         }
1064
1065         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1066         if (ret < 0)
1067                 return ret;
1068
1069         adev->dm.audio_registered = true;
1070
1071         return 0;
1072 }
1073
1074 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1075 {
1076         if (!amdgpu_audio)
1077                 return;
1078
1079         if (!adev->mode_info.audio.enabled)
1080                 return;
1081
1082         if (adev->dm.audio_registered) {
1083                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1084                 adev->dm.audio_registered = false;
1085         }
1086
1087         /* TODO: Disable audio? */
1088
1089         adev->mode_info.audio.enabled = false;
1090 }
1091
1092 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1093 {
1094         struct drm_audio_component *acomp = adev->dm.audio_component;
1095
1096         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1097                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1098
1099                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1100                                                  pin, -1);
1101         }
1102 }
1103
1104 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1105 {
1106         const struct dmcub_firmware_header_v1_0 *hdr;
1107         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1108         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1109         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1110         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1111         struct abm *abm = adev->dm.dc->res_pool->abm;
1112         struct dc_context *ctx = adev->dm.dc->ctx;
1113         struct dmub_srv_hw_params hw_params;
1114         enum dmub_status status;
1115         const unsigned char *fw_inst_const, *fw_bss_data;
1116         u32 i, fw_inst_const_size, fw_bss_data_size;
1117         bool has_hw_support;
1118
1119         if (!dmub_srv)
1120                 /* DMUB isn't supported on the ASIC. */
1121                 return 0;
1122
1123         if (!fb_info) {
1124                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1125                 return -EINVAL;
1126         }
1127
1128         if (!dmub_fw) {
1129                 /* Firmware required for DMUB support. */
1130                 DRM_ERROR("No firmware provided for DMUB.\n");
1131                 return -EINVAL;
1132         }
1133
1134         /* initialize register offsets for ASICs with runtime initialization available */
1135         if (dmub_srv->hw_funcs.init_reg_offsets)
1136                 dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
1137
1138         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1139         if (status != DMUB_STATUS_OK) {
1140                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1141                 return -EINVAL;
1142         }
1143
1144         if (!has_hw_support) {
1145                 DRM_INFO("DMUB unsupported on ASIC\n");
1146                 return 0;
1147         }
1148
1149         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1150         status = dmub_srv_hw_reset(dmub_srv);
1151         if (status != DMUB_STATUS_OK)
1152                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1153
1154         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1155
1156         fw_inst_const = dmub_fw->data +
1157                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1158                         PSP_HEADER_BYTES;
1159
1160         fw_bss_data = dmub_fw->data +
1161                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1162                       le32_to_cpu(hdr->inst_const_bytes);
1163
1164         /* Copy firmware and bios info into FB memory. */
1165         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1166                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1167
1168         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1169
1170         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1171          * amdgpu_ucode_init_single_fw will load dmub firmware
1172          * fw_inst_const part to cw0; otherwise, the firmware back door load
1173          * will be done by dm_dmub_hw_init
1174          */
1175         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1176                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1177                                 fw_inst_const_size);
1178         }
1179
1180         if (fw_bss_data_size)
1181                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1182                        fw_bss_data, fw_bss_data_size);
1183
1184         /* Copy firmware bios info into FB memory. */
1185         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1186                adev->bios_size);
1187
1188         /* Reset regions that need to be reset. */
1189         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1190         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1191
1192         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1193                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1194
1195         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1196                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1197
1198         /* Initialize hardware. */
1199         memset(&hw_params, 0, sizeof(hw_params));
1200         hw_params.fb_base = adev->gmc.fb_start;
1201         hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1202
1203         /* backdoor load firmware and trigger dmub running */
1204         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1205                 hw_params.load_inst_const = true;
1206
1207         if (dmcu)
1208                 hw_params.psp_version = dmcu->psp_version;
1209
1210         for (i = 0; i < fb_info->num_fb; ++i)
1211                 hw_params.fb[i] = &fb_info->fb[i];
1212
1213         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1214         case IP_VERSION(3, 1, 3):
1215         case IP_VERSION(3, 1, 4):
1216         case IP_VERSION(3, 5, 0):
1217                 hw_params.dpia_supported = true;
1218                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1219                 break;
1220         default:
1221                 break;
1222         }
1223
1224         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1225         if (status != DMUB_STATUS_OK) {
1226                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1227                 return -EINVAL;
1228         }
1229
1230         /* Wait for firmware load to finish. */
1231         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1232         if (status != DMUB_STATUS_OK)
1233                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1234
1235         /* Init DMCU and ABM if available. */
1236         if (dmcu && abm) {
1237                 dmcu->funcs->dmcu_init(dmcu);
1238                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1239         }
1240
1241         if (!adev->dm.dc->ctx->dmub_srv)
1242                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1243         if (!adev->dm.dc->ctx->dmub_srv) {
1244                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1245                 return -ENOMEM;
1246         }
1247
1248         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1249                  adev->dm.dmcub_fw_version);
1250
1251         return 0;
1252 }
1253
1254 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1255 {
1256         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1257         enum dmub_status status;
1258         bool init;
1259
1260         if (!dmub_srv) {
1261                 /* DMUB isn't supported on the ASIC. */
1262                 return;
1263         }
1264
1265         status = dmub_srv_is_hw_init(dmub_srv, &init);
1266         if (status != DMUB_STATUS_OK)
1267                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1268
1269         if (status == DMUB_STATUS_OK && init) {
1270                 /* Wait for firmware load to finish. */
1271                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1272                 if (status != DMUB_STATUS_OK)
1273                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1274         } else {
1275                 /* Perform the full hardware initialization. */
1276                 dm_dmub_hw_init(adev);
1277         }
1278 }
1279
1280 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1281 {
1282         u64 pt_base;
1283         u32 logical_addr_low;
1284         u32 logical_addr_high;
1285         u32 agp_base, agp_bot, agp_top;
1286         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1287
1288         memset(pa_config, 0, sizeof(*pa_config));
1289
1290         agp_base = 0;
1291         agp_bot = adev->gmc.agp_start >> 24;
1292         agp_top = adev->gmc.agp_end >> 24;
1293
1294         /* AGP aperture is disabled */
1295         if (agp_bot > agp_top) {
1296                 logical_addr_low = adev->gmc.fb_start >> 18;
1297                 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1298                                        AMD_APU_IS_RENOIR |
1299                                        AMD_APU_IS_GREEN_SARDINE))
1300                         /*
1301                          * Raven2 has a HW issue that it is unable to use the vram which
1302                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1303                          * workaround that increase system aperture high address (add 1)
1304                          * to get rid of the VM fault and hardware hang.
1305                          */
1306                         logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1307                 else
1308                         logical_addr_high = adev->gmc.fb_end >> 18;
1309         } else {
1310                 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1311                 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1312                                        AMD_APU_IS_RENOIR |
1313                                        AMD_APU_IS_GREEN_SARDINE))
1314                         /*
1315                          * Raven2 has a HW issue that it is unable to use the vram which
1316                          * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1317                          * workaround that increase system aperture high address (add 1)
1318                          * to get rid of the VM fault and hardware hang.
1319                          */
1320                         logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1321                 else
1322                         logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1323         }
1324
1325         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1326
1327         page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1328                                                    AMDGPU_GPU_PAGE_SHIFT);
1329         page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1330                                                   AMDGPU_GPU_PAGE_SHIFT);
1331         page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1332                                                  AMDGPU_GPU_PAGE_SHIFT);
1333         page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1334                                                 AMDGPU_GPU_PAGE_SHIFT);
1335         page_table_base.high_part = upper_32_bits(pt_base);
1336         page_table_base.low_part = lower_32_bits(pt_base);
1337
1338         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1339         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1340
1341         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
1342         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1343         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1344
1345         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1346         pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1347         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1348
1349         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1350         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1351         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1352
1353         pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1354
1355 }
1356
1357 static void force_connector_state(
1358         struct amdgpu_dm_connector *aconnector,
1359         enum drm_connector_force force_state)
1360 {
1361         struct drm_connector *connector = &aconnector->base;
1362
1363         mutex_lock(&connector->dev->mode_config.mutex);
1364         aconnector->base.force = force_state;
1365         mutex_unlock(&connector->dev->mode_config.mutex);
1366
1367         mutex_lock(&aconnector->hpd_lock);
1368         drm_kms_helper_connector_hotplug_event(connector);
1369         mutex_unlock(&aconnector->hpd_lock);
1370 }
1371
1372 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1373 {
1374         struct hpd_rx_irq_offload_work *offload_work;
1375         struct amdgpu_dm_connector *aconnector;
1376         struct dc_link *dc_link;
1377         struct amdgpu_device *adev;
1378         enum dc_connection_type new_connection_type = dc_connection_none;
1379         unsigned long flags;
1380         union test_response test_response;
1381
1382         memset(&test_response, 0, sizeof(test_response));
1383
1384         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1385         aconnector = offload_work->offload_wq->aconnector;
1386
1387         if (!aconnector) {
1388                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1389                 goto skip;
1390         }
1391
1392         adev = drm_to_adev(aconnector->base.dev);
1393         dc_link = aconnector->dc_link;
1394
1395         mutex_lock(&aconnector->hpd_lock);
1396         if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1397                 DRM_ERROR("KMS: Failed to detect connector\n");
1398         mutex_unlock(&aconnector->hpd_lock);
1399
1400         if (new_connection_type == dc_connection_none)
1401                 goto skip;
1402
1403         if (amdgpu_in_reset(adev))
1404                 goto skip;
1405
1406         if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1407                 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1408                 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1409                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1410                 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1411                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1412                 goto skip;
1413         }
1414
1415         mutex_lock(&adev->dm.dc_lock);
1416         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1417                 dc_link_dp_handle_automated_test(dc_link);
1418
1419                 if (aconnector->timing_changed) {
1420                         /* force connector disconnect and reconnect */
1421                         force_connector_state(aconnector, DRM_FORCE_OFF);
1422                         msleep(100);
1423                         force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1424                 }
1425
1426                 test_response.bits.ACK = 1;
1427
1428                 core_link_write_dpcd(
1429                 dc_link,
1430                 DP_TEST_RESPONSE,
1431                 &test_response.raw,
1432                 sizeof(test_response));
1433         } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1434                         dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1435                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1436                 /* offload_work->data is from handle_hpd_rx_irq->
1437                  * schedule_hpd_rx_offload_work.this is defer handle
1438                  * for hpd short pulse. upon here, link status may be
1439                  * changed, need get latest link status from dpcd
1440                  * registers. if link status is good, skip run link
1441                  * training again.
1442                  */
1443                 union hpd_irq_data irq_data;
1444
1445                 memset(&irq_data, 0, sizeof(irq_data));
1446
1447                 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1448                  * request be added to work queue if link lost at end of dc_link_
1449                  * dp_handle_link_loss
1450                  */
1451                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1452                 offload_work->offload_wq->is_handling_link_loss = false;
1453                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1454
1455                 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1456                         dc_link_check_link_loss_status(dc_link, &irq_data))
1457                         dc_link_dp_handle_link_loss(dc_link);
1458         }
1459         mutex_unlock(&adev->dm.dc_lock);
1460
1461 skip:
1462         kfree(offload_work);
1463
1464 }
1465
1466 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1467 {
1468         int max_caps = dc->caps.max_links;
1469         int i = 0;
1470         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1471
1472         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1473
1474         if (!hpd_rx_offload_wq)
1475                 return NULL;
1476
1477
1478         for (i = 0; i < max_caps; i++) {
1479                 hpd_rx_offload_wq[i].wq =
1480                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1481
1482                 if (hpd_rx_offload_wq[i].wq == NULL) {
1483                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1484                         goto out_err;
1485                 }
1486
1487                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1488         }
1489
1490         return hpd_rx_offload_wq;
1491
1492 out_err:
1493         for (i = 0; i < max_caps; i++) {
1494                 if (hpd_rx_offload_wq[i].wq)
1495                         destroy_workqueue(hpd_rx_offload_wq[i].wq);
1496         }
1497         kfree(hpd_rx_offload_wq);
1498         return NULL;
1499 }
1500
1501 struct amdgpu_stutter_quirk {
1502         u16 chip_vendor;
1503         u16 chip_device;
1504         u16 subsys_vendor;
1505         u16 subsys_device;
1506         u8 revision;
1507 };
1508
1509 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1510         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1511         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1512         { 0, 0, 0, 0, 0 },
1513 };
1514
1515 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1516 {
1517         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1518
1519         while (p && p->chip_device != 0) {
1520                 if (pdev->vendor == p->chip_vendor &&
1521                     pdev->device == p->chip_device &&
1522                     pdev->subsystem_vendor == p->subsys_vendor &&
1523                     pdev->subsystem_device == p->subsys_device &&
1524                     pdev->revision == p->revision) {
1525                         return true;
1526                 }
1527                 ++p;
1528         }
1529         return false;
1530 }
1531
1532 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1533         {
1534                 .matches = {
1535                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1536                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1537                 },
1538         },
1539         {
1540                 .matches = {
1541                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1542                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1543                 },
1544         },
1545         {
1546                 .matches = {
1547                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1548                         DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1549                 },
1550         },
1551         {
1552                 .matches = {
1553                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1554                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1555                 },
1556         },
1557         {
1558                 .matches = {
1559                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1560                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1561                 },
1562         },
1563         {
1564                 .matches = {
1565                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1566                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1567                 },
1568         },
1569         {
1570                 .matches = {
1571                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1572                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1573                 },
1574         },
1575         {
1576                 .matches = {
1577                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1578                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1579                 },
1580         },
1581         {
1582                 .matches = {
1583                         DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1584                         DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1585                 },
1586         },
1587         {}
1588         /* TODO: refactor this from a fixed table to a dynamic option */
1589 };
1590
1591 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1592 {
1593         const struct dmi_system_id *dmi_id;
1594
1595         dm->aux_hpd_discon_quirk = false;
1596
1597         dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1598         if (dmi_id) {
1599                 dm->aux_hpd_discon_quirk = true;
1600                 DRM_INFO("aux_hpd_discon_quirk attached\n");
1601         }
1602 }
1603
1604 static int amdgpu_dm_init(struct amdgpu_device *adev)
1605 {
1606         struct dc_init_data init_data;
1607         struct dc_callback_init init_params;
1608         int r;
1609
1610         adev->dm.ddev = adev_to_drm(adev);
1611         adev->dm.adev = adev;
1612
1613         /* Zero all the fields */
1614         memset(&init_data, 0, sizeof(init_data));
1615         memset(&init_params, 0, sizeof(init_params));
1616
1617         mutex_init(&adev->dm.dpia_aux_lock);
1618         mutex_init(&adev->dm.dc_lock);
1619         mutex_init(&adev->dm.audio_lock);
1620
1621         if (amdgpu_dm_irq_init(adev)) {
1622                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1623                 goto error;
1624         }
1625
1626         init_data.asic_id.chip_family = adev->family;
1627
1628         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1629         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1630         init_data.asic_id.chip_id = adev->pdev->device;
1631
1632         init_data.asic_id.vram_width = adev->gmc.vram_width;
1633         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1634         init_data.asic_id.atombios_base_address =
1635                 adev->mode_info.atom_context->bios;
1636
1637         init_data.driver = adev;
1638
1639         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1640
1641         if (!adev->dm.cgs_device) {
1642                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1643                 goto error;
1644         }
1645
1646         init_data.cgs_device = adev->dm.cgs_device;
1647
1648         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1649
1650         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
1651         case IP_VERSION(2, 1, 0):
1652                 switch (adev->dm.dmcub_fw_version) {
1653                 case 0: /* development */
1654                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1655                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1656                         init_data.flags.disable_dmcu = false;
1657                         break;
1658                 default:
1659                         init_data.flags.disable_dmcu = true;
1660                 }
1661                 break;
1662         case IP_VERSION(2, 0, 3):
1663                 init_data.flags.disable_dmcu = true;
1664                 break;
1665         default:
1666                 break;
1667         }
1668
1669         /* APU support S/G display by default except:
1670          * ASICs before Carrizo,
1671          * RAVEN1 (Users reported stability issue)
1672          */
1673
1674         if (adev->asic_type < CHIP_CARRIZO) {
1675                 init_data.flags.gpu_vm_support = false;
1676         } else if (adev->asic_type == CHIP_RAVEN) {
1677                 if (adev->apu_flags & AMD_APU_IS_RAVEN)
1678                         init_data.flags.gpu_vm_support = false;
1679                 else
1680                         init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
1681         } else {
1682                 init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
1683         }
1684
1685         adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
1686
1687         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1688                 init_data.flags.fbc_support = true;
1689
1690         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1691                 init_data.flags.multi_mon_pp_mclk_switch = true;
1692
1693         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1694                 init_data.flags.disable_fractional_pwm = true;
1695
1696         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1697                 init_data.flags.edp_no_power_sequencing = true;
1698
1699         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1700                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1701         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1702                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1703
1704         init_data.flags.seamless_boot_edp_requested = false;
1705
1706         if (amdgpu_device_seamless_boot_supported(adev)) {
1707                 init_data.flags.seamless_boot_edp_requested = true;
1708                 init_data.flags.allow_seamless_boot_optimization = true;
1709                 DRM_INFO("Seamless boot condition check passed\n");
1710         }
1711
1712         init_data.flags.enable_mipi_converter_optimization = true;
1713
1714         init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1715         init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1716         init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
1717
1718         init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
1719
1720         /* Enable DWB for tested platforms only */
1721         if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
1722                 init_data.num_virtual_links = 1;
1723
1724         INIT_LIST_HEAD(&adev->dm.da_list);
1725
1726         retrieve_dmi_info(&adev->dm);
1727
1728         /* Display Core create. */
1729         adev->dm.dc = dc_create(&init_data);
1730
1731         if (adev->dm.dc) {
1732                 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
1733                          dce_version_to_string(adev->dm.dc->ctx->dce_version));
1734         } else {
1735                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1736                 goto error;
1737         }
1738
1739         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1740                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1741                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1742         }
1743
1744         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1745                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1746         if (dm_should_disable_stutter(adev->pdev))
1747                 adev->dm.dc->debug.disable_stutter = true;
1748
1749         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1750                 adev->dm.dc->debug.disable_stutter = true;
1751
1752         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1753                 adev->dm.dc->debug.disable_dsc = true;
1754
1755         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1756                 adev->dm.dc->debug.disable_clock_gate = true;
1757
1758         if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1759                 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1760
1761         adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1762
1763         /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1764         adev->dm.dc->debug.ignore_cable_id = true;
1765
1766         if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1767                 DRM_INFO("DP-HDMI FRL PCON supported\n");
1768
1769         r = dm_dmub_hw_init(adev);
1770         if (r) {
1771                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1772                 goto error;
1773         }
1774
1775         dc_hardware_init(adev->dm.dc);
1776
1777         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1778         if (!adev->dm.hpd_rx_offload_wq) {
1779                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1780                 goto error;
1781         }
1782
1783         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1784                 struct dc_phy_addr_space_config pa_config;
1785
1786                 mmhub_read_system_context(adev, &pa_config);
1787
1788                 // Call the DC init_memory func
1789                 dc_setup_system_context(adev->dm.dc, &pa_config);
1790         }
1791
1792         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1793         if (!adev->dm.freesync_module) {
1794                 DRM_ERROR(
1795                 "amdgpu: failed to initialize freesync_module.\n");
1796         } else
1797                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1798                                 adev->dm.freesync_module);
1799
1800         amdgpu_dm_init_color_mod();
1801
1802         if (adev->dm.dc->caps.max_links > 0) {
1803                 adev->dm.vblank_control_workqueue =
1804                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1805                 if (!adev->dm.vblank_control_workqueue)
1806                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1807         }
1808
1809         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1810                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1811
1812                 if (!adev->dm.hdcp_workqueue)
1813                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1814                 else
1815                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1816
1817                 dc_init_callbacks(adev->dm.dc, &init_params);
1818         }
1819         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1820                 init_completion(&adev->dm.dmub_aux_transfer_done);
1821                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1822                 if (!adev->dm.dmub_notify) {
1823                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1824                         goto error;
1825                 }
1826
1827                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1828                 if (!adev->dm.delayed_hpd_wq) {
1829                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1830                         goto error;
1831                 }
1832
1833                 amdgpu_dm_outbox_init(adev);
1834                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1835                         dmub_aux_setconfig_callback, false)) {
1836                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1837                         goto error;
1838                 }
1839                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1840                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1841                         goto error;
1842                 }
1843                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1844                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1845                         goto error;
1846                 }
1847         }
1848
1849         /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1850          * It is expected that DMUB will resend any pending notifications at this point, for
1851          * example HPD from DPIA.
1852          */
1853         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1854                 dc_enable_dmub_outbox(adev->dm.dc);
1855
1856                 /* DPIA trace goes to dmesg logs only if outbox is enabled */
1857                 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
1858                         dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
1859         }
1860
1861         if (amdgpu_dm_initialize_drm_device(adev)) {
1862                 DRM_ERROR(
1863                 "amdgpu: failed to initialize sw for display support.\n");
1864                 goto error;
1865         }
1866
1867         /* create fake encoders for MST */
1868         dm_dp_create_fake_mst_encoders(adev);
1869
1870         /* TODO: Add_display_info? */
1871
1872         /* TODO use dynamic cursor width */
1873         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1874         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1875
1876         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1877                 DRM_ERROR(
1878                 "amdgpu: failed to initialize sw for display support.\n");
1879                 goto error;
1880         }
1881
1882 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1883         adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1884         if (!adev->dm.secure_display_ctxs)
1885                 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
1886 #endif
1887
1888         DRM_DEBUG_DRIVER("KMS initialized.\n");
1889
1890         return 0;
1891 error:
1892         amdgpu_dm_fini(adev);
1893
1894         return -EINVAL;
1895 }
1896
1897 static int amdgpu_dm_early_fini(void *handle)
1898 {
1899         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1900
1901         amdgpu_dm_audio_fini(adev);
1902
1903         return 0;
1904 }
1905
1906 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1907 {
1908         int i;
1909
1910         if (adev->dm.vblank_control_workqueue) {
1911                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1912                 adev->dm.vblank_control_workqueue = NULL;
1913         }
1914
1915         amdgpu_dm_destroy_drm_device(&adev->dm);
1916
1917 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1918         if (adev->dm.secure_display_ctxs) {
1919                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1920                         if (adev->dm.secure_display_ctxs[i].crtc) {
1921                                 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1922                                 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1923                         }
1924                 }
1925                 kfree(adev->dm.secure_display_ctxs);
1926                 adev->dm.secure_display_ctxs = NULL;
1927         }
1928 #endif
1929         if (adev->dm.hdcp_workqueue) {
1930                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1931                 adev->dm.hdcp_workqueue = NULL;
1932         }
1933
1934         if (adev->dm.dc)
1935                 dc_deinit_callbacks(adev->dm.dc);
1936
1937         if (adev->dm.dc)
1938                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1939
1940         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1941                 kfree(adev->dm.dmub_notify);
1942                 adev->dm.dmub_notify = NULL;
1943                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1944                 adev->dm.delayed_hpd_wq = NULL;
1945         }
1946
1947         if (adev->dm.dmub_bo)
1948                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1949                                       &adev->dm.dmub_bo_gpu_addr,
1950                                       &adev->dm.dmub_bo_cpu_addr);
1951
1952         if (adev->dm.hpd_rx_offload_wq) {
1953                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1954                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1955                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1956                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1957                         }
1958                 }
1959
1960                 kfree(adev->dm.hpd_rx_offload_wq);
1961                 adev->dm.hpd_rx_offload_wq = NULL;
1962         }
1963
1964         /* DC Destroy TODO: Replace destroy DAL */
1965         if (adev->dm.dc)
1966                 dc_destroy(&adev->dm.dc);
1967         /*
1968          * TODO: pageflip, vlank interrupt
1969          *
1970          * amdgpu_dm_irq_fini(adev);
1971          */
1972
1973         if (adev->dm.cgs_device) {
1974                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1975                 adev->dm.cgs_device = NULL;
1976         }
1977         if (adev->dm.freesync_module) {
1978                 mod_freesync_destroy(adev->dm.freesync_module);
1979                 adev->dm.freesync_module = NULL;
1980         }
1981
1982         mutex_destroy(&adev->dm.audio_lock);
1983         mutex_destroy(&adev->dm.dc_lock);
1984         mutex_destroy(&adev->dm.dpia_aux_lock);
1985 }
1986
1987 static int load_dmcu_fw(struct amdgpu_device *adev)
1988 {
1989         const char *fw_name_dmcu = NULL;
1990         int r;
1991         const struct dmcu_firmware_header_v1_0 *hdr;
1992
1993         switch (adev->asic_type) {
1994 #if defined(CONFIG_DRM_AMD_DC_SI)
1995         case CHIP_TAHITI:
1996         case CHIP_PITCAIRN:
1997         case CHIP_VERDE:
1998         case CHIP_OLAND:
1999 #endif
2000         case CHIP_BONAIRE:
2001         case CHIP_HAWAII:
2002         case CHIP_KAVERI:
2003         case CHIP_KABINI:
2004         case CHIP_MULLINS:
2005         case CHIP_TONGA:
2006         case CHIP_FIJI:
2007         case CHIP_CARRIZO:
2008         case CHIP_STONEY:
2009         case CHIP_POLARIS11:
2010         case CHIP_POLARIS10:
2011         case CHIP_POLARIS12:
2012         case CHIP_VEGAM:
2013         case CHIP_VEGA10:
2014         case CHIP_VEGA12:
2015         case CHIP_VEGA20:
2016                 return 0;
2017         case CHIP_NAVI12:
2018                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
2019                 break;
2020         case CHIP_RAVEN:
2021                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
2022                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2023                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
2024                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2025                 else
2026                         return 0;
2027                 break;
2028         default:
2029                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2030                 case IP_VERSION(2, 0, 2):
2031                 case IP_VERSION(2, 0, 3):
2032                 case IP_VERSION(2, 0, 0):
2033                 case IP_VERSION(2, 1, 0):
2034                 case IP_VERSION(3, 0, 0):
2035                 case IP_VERSION(3, 0, 2):
2036                 case IP_VERSION(3, 0, 3):
2037                 case IP_VERSION(3, 0, 1):
2038                 case IP_VERSION(3, 1, 2):
2039                 case IP_VERSION(3, 1, 3):
2040                 case IP_VERSION(3, 1, 4):
2041                 case IP_VERSION(3, 1, 5):
2042                 case IP_VERSION(3, 1, 6):
2043                 case IP_VERSION(3, 2, 0):
2044                 case IP_VERSION(3, 2, 1):
2045                 case IP_VERSION(3, 5, 0):
2046                         return 0;
2047                 default:
2048                         break;
2049                 }
2050                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2051                 return -EINVAL;
2052         }
2053
2054         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2055                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2056                 return 0;
2057         }
2058
2059         r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
2060         if (r == -ENODEV) {
2061                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2062                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2063                 adev->dm.fw_dmcu = NULL;
2064                 return 0;
2065         }
2066         if (r) {
2067                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
2068                         fw_name_dmcu);
2069                 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2070                 return r;
2071         }
2072
2073         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2074         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2075         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2076         adev->firmware.fw_size +=
2077                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2078
2079         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2080         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2081         adev->firmware.fw_size +=
2082                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2083
2084         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2085
2086         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2087
2088         return 0;
2089 }
2090
2091 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2092 {
2093         struct amdgpu_device *adev = ctx;
2094
2095         return dm_read_reg(adev->dm.dc->ctx, address);
2096 }
2097
2098 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2099                                      uint32_t value)
2100 {
2101         struct amdgpu_device *adev = ctx;
2102
2103         return dm_write_reg(adev->dm.dc->ctx, address, value);
2104 }
2105
2106 static int dm_dmub_sw_init(struct amdgpu_device *adev)
2107 {
2108         struct dmub_srv_create_params create_params;
2109         struct dmub_srv_region_params region_params;
2110         struct dmub_srv_region_info region_info;
2111         struct dmub_srv_memory_params memory_params;
2112         struct dmub_srv_fb_info *fb_info;
2113         struct dmub_srv *dmub_srv;
2114         const struct dmcub_firmware_header_v1_0 *hdr;
2115         enum dmub_asic dmub_asic;
2116         enum dmub_status status;
2117         int r;
2118
2119         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2120         case IP_VERSION(2, 1, 0):
2121                 dmub_asic = DMUB_ASIC_DCN21;
2122                 break;
2123         case IP_VERSION(3, 0, 0):
2124                 dmub_asic = DMUB_ASIC_DCN30;
2125                 break;
2126         case IP_VERSION(3, 0, 1):
2127                 dmub_asic = DMUB_ASIC_DCN301;
2128                 break;
2129         case IP_VERSION(3, 0, 2):
2130                 dmub_asic = DMUB_ASIC_DCN302;
2131                 break;
2132         case IP_VERSION(3, 0, 3):
2133                 dmub_asic = DMUB_ASIC_DCN303;
2134                 break;
2135         case IP_VERSION(3, 1, 2):
2136         case IP_VERSION(3, 1, 3):
2137                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2138                 break;
2139         case IP_VERSION(3, 1, 4):
2140                 dmub_asic = DMUB_ASIC_DCN314;
2141                 break;
2142         case IP_VERSION(3, 1, 5):
2143                 dmub_asic = DMUB_ASIC_DCN315;
2144                 break;
2145         case IP_VERSION(3, 1, 6):
2146                 dmub_asic = DMUB_ASIC_DCN316;
2147                 break;
2148         case IP_VERSION(3, 2, 0):
2149                 dmub_asic = DMUB_ASIC_DCN32;
2150                 break;
2151         case IP_VERSION(3, 2, 1):
2152                 dmub_asic = DMUB_ASIC_DCN321;
2153                 break;
2154         case IP_VERSION(3, 5, 0):
2155                 dmub_asic = DMUB_ASIC_DCN35;
2156                 break;
2157         default:
2158                 /* ASIC doesn't support DMUB. */
2159                 return 0;
2160         }
2161
2162         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2163         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2164
2165         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2166                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2167                         AMDGPU_UCODE_ID_DMCUB;
2168                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2169                         adev->dm.dmub_fw;
2170                 adev->firmware.fw_size +=
2171                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2172
2173                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2174                          adev->dm.dmcub_fw_version);
2175         }
2176
2177
2178         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2179         dmub_srv = adev->dm.dmub_srv;
2180
2181         if (!dmub_srv) {
2182                 DRM_ERROR("Failed to allocate DMUB service!\n");
2183                 return -ENOMEM;
2184         }
2185
2186         memset(&create_params, 0, sizeof(create_params));
2187         create_params.user_ctx = adev;
2188         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2189         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2190         create_params.asic = dmub_asic;
2191
2192         /* Create the DMUB service. */
2193         status = dmub_srv_create(dmub_srv, &create_params);
2194         if (status != DMUB_STATUS_OK) {
2195                 DRM_ERROR("Error creating DMUB service: %d\n", status);
2196                 return -EINVAL;
2197         }
2198
2199         /* Calculate the size of all the regions for the DMUB service. */
2200         memset(&region_params, 0, sizeof(region_params));
2201
2202         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2203                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2204         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2205         region_params.vbios_size = adev->bios_size;
2206         region_params.fw_bss_data = region_params.bss_data_size ?
2207                 adev->dm.dmub_fw->data +
2208                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2209                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2210         region_params.fw_inst_const =
2211                 adev->dm.dmub_fw->data +
2212                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2213                 PSP_HEADER_BYTES;
2214         region_params.is_mailbox_in_inbox = false;
2215
2216         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2217                                            &region_info);
2218
2219         if (status != DMUB_STATUS_OK) {
2220                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2221                 return -EINVAL;
2222         }
2223
2224         /*
2225          * Allocate a framebuffer based on the total size of all the regions.
2226          * TODO: Move this into GART.
2227          */
2228         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2229                                     AMDGPU_GEM_DOMAIN_VRAM |
2230                                     AMDGPU_GEM_DOMAIN_GTT,
2231                                     &adev->dm.dmub_bo,
2232                                     &adev->dm.dmub_bo_gpu_addr,
2233                                     &adev->dm.dmub_bo_cpu_addr);
2234         if (r)
2235                 return r;
2236
2237         /* Rebase the regions on the framebuffer address. */
2238         memset(&memory_params, 0, sizeof(memory_params));
2239         memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2240         memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2241         memory_params.region_info = &region_info;
2242
2243         adev->dm.dmub_fb_info =
2244                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2245         fb_info = adev->dm.dmub_fb_info;
2246
2247         if (!fb_info) {
2248                 DRM_ERROR(
2249                         "Failed to allocate framebuffer info for DMUB service!\n");
2250                 return -ENOMEM;
2251         }
2252
2253         status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
2254         if (status != DMUB_STATUS_OK) {
2255                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2256                 return -EINVAL;
2257         }
2258
2259         return 0;
2260 }
2261
2262 static int dm_sw_init(void *handle)
2263 {
2264         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2265         int r;
2266
2267         r = dm_dmub_sw_init(adev);
2268         if (r)
2269                 return r;
2270
2271         return load_dmcu_fw(adev);
2272 }
2273
2274 static int dm_sw_fini(void *handle)
2275 {
2276         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2277
2278         kfree(adev->dm.dmub_fb_info);
2279         adev->dm.dmub_fb_info = NULL;
2280
2281         if (adev->dm.dmub_srv) {
2282                 dmub_srv_destroy(adev->dm.dmub_srv);
2283                 adev->dm.dmub_srv = NULL;
2284         }
2285
2286         amdgpu_ucode_release(&adev->dm.dmub_fw);
2287         amdgpu_ucode_release(&adev->dm.fw_dmcu);
2288
2289         return 0;
2290 }
2291
2292 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2293 {
2294         struct amdgpu_dm_connector *aconnector;
2295         struct drm_connector *connector;
2296         struct drm_connector_list_iter iter;
2297         int ret = 0;
2298
2299         drm_connector_list_iter_begin(dev, &iter);
2300         drm_for_each_connector_iter(connector, &iter) {
2301
2302                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2303                         continue;
2304
2305                 aconnector = to_amdgpu_dm_connector(connector);
2306                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2307                     aconnector->mst_mgr.aux) {
2308                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2309                                          aconnector,
2310                                          aconnector->base.base.id);
2311
2312                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2313                         if (ret < 0) {
2314                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2315                                 aconnector->dc_link->type =
2316                                         dc_connection_single;
2317                                 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2318                                                                      aconnector->dc_link);
2319                                 break;
2320                         }
2321                 }
2322         }
2323         drm_connector_list_iter_end(&iter);
2324
2325         return ret;
2326 }
2327
2328 static int dm_late_init(void *handle)
2329 {
2330         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2331
2332         struct dmcu_iram_parameters params;
2333         unsigned int linear_lut[16];
2334         int i;
2335         struct dmcu *dmcu = NULL;
2336
2337         dmcu = adev->dm.dc->res_pool->dmcu;
2338
2339         for (i = 0; i < 16; i++)
2340                 linear_lut[i] = 0xFFFF * i / 15;
2341
2342         params.set = 0;
2343         params.backlight_ramping_override = false;
2344         params.backlight_ramping_start = 0xCCCC;
2345         params.backlight_ramping_reduction = 0xCCCCCCCC;
2346         params.backlight_lut_array_size = 16;
2347         params.backlight_lut_array = linear_lut;
2348
2349         /* Min backlight level after ABM reduction,  Don't allow below 1%
2350          * 0xFFFF x 0.01 = 0x28F
2351          */
2352         params.min_abm_backlight = 0x28F;
2353         /* In the case where abm is implemented on dmcub,
2354          * dmcu object will be null.
2355          * ABM 2.4 and up are implemented on dmcub.
2356          */
2357         if (dmcu) {
2358                 if (!dmcu_load_iram(dmcu, params))
2359                         return -EINVAL;
2360         } else if (adev->dm.dc->ctx->dmub_srv) {
2361                 struct dc_link *edp_links[MAX_NUM_EDP];
2362                 int edp_num;
2363
2364                 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2365                 for (i = 0; i < edp_num; i++) {
2366                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2367                                 return -EINVAL;
2368                 }
2369         }
2370
2371         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2372 }
2373
2374 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2375 {
2376         int ret;
2377         u8 guid[16];
2378         u64 tmp64;
2379
2380         mutex_lock(&mgr->lock);
2381         if (!mgr->mst_primary)
2382                 goto out_fail;
2383
2384         if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2385                 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2386                 goto out_fail;
2387         }
2388
2389         ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2390                                  DP_MST_EN |
2391                                  DP_UP_REQ_EN |
2392                                  DP_UPSTREAM_IS_SRC);
2393         if (ret < 0) {
2394                 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2395                 goto out_fail;
2396         }
2397
2398         /* Some hubs forget their guids after they resume */
2399         ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2400         if (ret != 16) {
2401                 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2402                 goto out_fail;
2403         }
2404
2405         if (memchr_inv(guid, 0, 16) == NULL) {
2406                 tmp64 = get_jiffies_64();
2407                 memcpy(&guid[0], &tmp64, sizeof(u64));
2408                 memcpy(&guid[8], &tmp64, sizeof(u64));
2409
2410                 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2411
2412                 if (ret != 16) {
2413                         drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2414                         goto out_fail;
2415                 }
2416         }
2417
2418         memcpy(mgr->mst_primary->guid, guid, 16);
2419
2420 out_fail:
2421         mutex_unlock(&mgr->lock);
2422 }
2423
2424 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2425 {
2426         struct amdgpu_dm_connector *aconnector;
2427         struct drm_connector *connector;
2428         struct drm_connector_list_iter iter;
2429         struct drm_dp_mst_topology_mgr *mgr;
2430
2431         drm_connector_list_iter_begin(dev, &iter);
2432         drm_for_each_connector_iter(connector, &iter) {
2433
2434                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2435                         continue;
2436
2437                 aconnector = to_amdgpu_dm_connector(connector);
2438                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2439                     aconnector->mst_root)
2440                         continue;
2441
2442                 mgr = &aconnector->mst_mgr;
2443
2444                 if (suspend) {
2445                         drm_dp_mst_topology_mgr_suspend(mgr);
2446                 } else {
2447                         /* if extended timeout is supported in hardware,
2448                          * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2449                          * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2450                          */
2451                         try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2452                         if (!dp_is_lttpr_present(aconnector->dc_link))
2453                                 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2454
2455                         /* TODO: move resume_mst_branch_status() into drm mst resume again
2456                          * once topology probing work is pulled out from mst resume into mst
2457                          * resume 2nd step. mst resume 2nd step should be called after old
2458                          * state getting restored (i.e. drm_atomic_helper_resume()).
2459                          */
2460                         resume_mst_branch_status(mgr);
2461                 }
2462         }
2463         drm_connector_list_iter_end(&iter);
2464 }
2465
2466 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2467 {
2468         int ret = 0;
2469
2470         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2471          * on window driver dc implementation.
2472          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2473          * should be passed to smu during boot up and resume from s3.
2474          * boot up: dc calculate dcn watermark clock settings within dc_create,
2475          * dcn20_resource_construct
2476          * then call pplib functions below to pass the settings to smu:
2477          * smu_set_watermarks_for_clock_ranges
2478          * smu_set_watermarks_table
2479          * navi10_set_watermarks_table
2480          * smu_write_watermarks_table
2481          *
2482          * For Renoir, clock settings of dcn watermark are also fixed values.
2483          * dc has implemented different flow for window driver:
2484          * dc_hardware_init / dc_set_power_state
2485          * dcn10_init_hw
2486          * notify_wm_ranges
2487          * set_wm_ranges
2488          * -- Linux
2489          * smu_set_watermarks_for_clock_ranges
2490          * renoir_set_watermarks_table
2491          * smu_write_watermarks_table
2492          *
2493          * For Linux,
2494          * dc_hardware_init -> amdgpu_dm_init
2495          * dc_set_power_state --> dm_resume
2496          *
2497          * therefore, this function apply to navi10/12/14 but not Renoir
2498          * *
2499          */
2500         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2501         case IP_VERSION(2, 0, 2):
2502         case IP_VERSION(2, 0, 0):
2503                 break;
2504         default:
2505                 return 0;
2506         }
2507
2508         ret = amdgpu_dpm_write_watermarks_table(adev);
2509         if (ret) {
2510                 DRM_ERROR("Failed to update WMTABLE!\n");
2511                 return ret;
2512         }
2513
2514         return 0;
2515 }
2516
2517 /**
2518  * dm_hw_init() - Initialize DC device
2519  * @handle: The base driver device containing the amdgpu_dm device.
2520  *
2521  * Initialize the &struct amdgpu_display_manager device. This involves calling
2522  * the initializers of each DM component, then populating the struct with them.
2523  *
2524  * Although the function implies hardware initialization, both hardware and
2525  * software are initialized here. Splitting them out to their relevant init
2526  * hooks is a future TODO item.
2527  *
2528  * Some notable things that are initialized here:
2529  *
2530  * - Display Core, both software and hardware
2531  * - DC modules that we need (freesync and color management)
2532  * - DRM software states
2533  * - Interrupt sources and handlers
2534  * - Vblank support
2535  * - Debug FS entries, if enabled
2536  */
2537 static int dm_hw_init(void *handle)
2538 {
2539         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2540         /* Create DAL display manager */
2541         amdgpu_dm_init(adev);
2542         amdgpu_dm_hpd_init(adev);
2543
2544         return 0;
2545 }
2546
2547 /**
2548  * dm_hw_fini() - Teardown DC device
2549  * @handle: The base driver device containing the amdgpu_dm device.
2550  *
2551  * Teardown components within &struct amdgpu_display_manager that require
2552  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2553  * were loaded. Also flush IRQ workqueues and disable them.
2554  */
2555 static int dm_hw_fini(void *handle)
2556 {
2557         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2558
2559         amdgpu_dm_hpd_fini(adev);
2560
2561         amdgpu_dm_irq_fini(adev);
2562         amdgpu_dm_fini(adev);
2563         return 0;
2564 }
2565
2566
2567 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2568                                  struct dc_state *state, bool enable)
2569 {
2570         enum dc_irq_source irq_source;
2571         struct amdgpu_crtc *acrtc;
2572         int rc = -EBUSY;
2573         int i = 0;
2574
2575         for (i = 0; i < state->stream_count; i++) {
2576                 acrtc = get_crtc_by_otg_inst(
2577                                 adev, state->stream_status[i].primary_otg_inst);
2578
2579                 if (acrtc && state->stream_status[i].plane_count != 0) {
2580                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2581                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2582                         if (rc)
2583                                 DRM_WARN("Failed to %s pflip interrupts\n",
2584                                          enable ? "enable" : "disable");
2585
2586                         if (enable) {
2587                                 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
2588                                         rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
2589                         } else
2590                                 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
2591
2592                         if (rc)
2593                                 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
2594
2595                         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2596                         /* During gpu-reset we disable and then enable vblank irq, so
2597                          * don't use amdgpu_irq_get/put() to avoid refcount change.
2598                          */
2599                         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
2600                                 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
2601                 }
2602         }
2603
2604 }
2605
2606 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2607 {
2608         struct dc_state *context = NULL;
2609         enum dc_status res = DC_ERROR_UNEXPECTED;
2610         int i;
2611         struct dc_stream_state *del_streams[MAX_PIPES];
2612         int del_streams_count = 0;
2613
2614         memset(del_streams, 0, sizeof(del_streams));
2615
2616         context = dc_state_create_current_copy(dc);
2617         if (context == NULL)
2618                 goto context_alloc_fail;
2619
2620         /* First remove from context all streams */
2621         for (i = 0; i < context->stream_count; i++) {
2622                 struct dc_stream_state *stream = context->streams[i];
2623
2624                 del_streams[del_streams_count++] = stream;
2625         }
2626
2627         /* Remove all planes for removed streams and then remove the streams */
2628         for (i = 0; i < del_streams_count; i++) {
2629                 if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2630                         res = DC_FAIL_DETACH_SURFACES;
2631                         goto fail;
2632                 }
2633
2634                 res = dc_state_remove_stream(dc, context, del_streams[i]);
2635                 if (res != DC_OK)
2636                         goto fail;
2637         }
2638
2639         res = dc_commit_streams(dc, context->streams, context->stream_count);
2640
2641 fail:
2642         dc_state_release(context);
2643
2644 context_alloc_fail:
2645         return res;
2646 }
2647
2648 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2649 {
2650         int i;
2651
2652         if (dm->hpd_rx_offload_wq) {
2653                 for (i = 0; i < dm->dc->caps.max_links; i++)
2654                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2655         }
2656 }
2657
2658 static int dm_suspend(void *handle)
2659 {
2660         struct amdgpu_device *adev = handle;
2661         struct amdgpu_display_manager *dm = &adev->dm;
2662         int ret = 0;
2663
2664         if (amdgpu_in_reset(adev)) {
2665                 mutex_lock(&dm->dc_lock);
2666
2667                 dc_allow_idle_optimizations(adev->dm.dc, false);
2668
2669                 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
2670
2671                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2672
2673                 amdgpu_dm_commit_zero_streams(dm->dc);
2674
2675                 amdgpu_dm_irq_suspend(adev);
2676
2677                 hpd_rx_irq_work_suspend(dm);
2678
2679                 return ret;
2680         }
2681
2682         WARN_ON(adev->dm.cached_state);
2683         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2684         if (IS_ERR(adev->dm.cached_state))
2685                 return PTR_ERR(adev->dm.cached_state);
2686
2687         s3_handle_mst(adev_to_drm(adev), true);
2688
2689         amdgpu_dm_irq_suspend(adev);
2690
2691         hpd_rx_irq_work_suspend(dm);
2692
2693         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2694         dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
2695
2696         return 0;
2697 }
2698
2699 struct drm_connector *
2700 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2701                                              struct drm_crtc *crtc)
2702 {
2703         u32 i;
2704         struct drm_connector_state *new_con_state;
2705         struct drm_connector *connector;
2706         struct drm_crtc *crtc_from_state;
2707
2708         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2709                 crtc_from_state = new_con_state->crtc;
2710
2711                 if (crtc_from_state == crtc)
2712                         return connector;
2713         }
2714
2715         return NULL;
2716 }
2717
2718 static void emulated_link_detect(struct dc_link *link)
2719 {
2720         struct dc_sink_init_data sink_init_data = { 0 };
2721         struct display_sink_capability sink_caps = { 0 };
2722         enum dc_edid_status edid_status;
2723         struct dc_context *dc_ctx = link->ctx;
2724         struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
2725         struct dc_sink *sink = NULL;
2726         struct dc_sink *prev_sink = NULL;
2727
2728         link->type = dc_connection_none;
2729         prev_sink = link->local_sink;
2730
2731         if (prev_sink)
2732                 dc_sink_release(prev_sink);
2733
2734         switch (link->connector_signal) {
2735         case SIGNAL_TYPE_HDMI_TYPE_A: {
2736                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2737                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2738                 break;
2739         }
2740
2741         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2742                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2743                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2744                 break;
2745         }
2746
2747         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2748                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2749                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2750                 break;
2751         }
2752
2753         case SIGNAL_TYPE_LVDS: {
2754                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2755                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2756                 break;
2757         }
2758
2759         case SIGNAL_TYPE_EDP: {
2760                 sink_caps.transaction_type =
2761                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2762                 sink_caps.signal = SIGNAL_TYPE_EDP;
2763                 break;
2764         }
2765
2766         case SIGNAL_TYPE_DISPLAY_PORT: {
2767                 sink_caps.transaction_type =
2768                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2769                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2770                 break;
2771         }
2772
2773         default:
2774                 drm_err(dev, "Invalid connector type! signal:%d\n",
2775                         link->connector_signal);
2776                 return;
2777         }
2778
2779         sink_init_data.link = link;
2780         sink_init_data.sink_signal = sink_caps.signal;
2781
2782         sink = dc_sink_create(&sink_init_data);
2783         if (!sink) {
2784                 drm_err(dev, "Failed to create sink!\n");
2785                 return;
2786         }
2787
2788         /* dc_sink_create returns a new reference */
2789         link->local_sink = sink;
2790
2791         edid_status = dm_helpers_read_local_edid(
2792                         link->ctx,
2793                         link,
2794                         sink);
2795
2796         if (edid_status != EDID_OK)
2797                 drm_err(dev, "Failed to read EDID\n");
2798
2799 }
2800
2801 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2802                                      struct amdgpu_display_manager *dm)
2803 {
2804         struct {
2805                 struct dc_surface_update surface_updates[MAX_SURFACES];
2806                 struct dc_plane_info plane_infos[MAX_SURFACES];
2807                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2808                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2809                 struct dc_stream_update stream_update;
2810         } *bundle;
2811         int k, m;
2812
2813         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2814
2815         if (!bundle) {
2816                 drm_err(dm->ddev, "Failed to allocate update bundle\n");
2817                 goto cleanup;
2818         }
2819
2820         for (k = 0; k < dc_state->stream_count; k++) {
2821                 bundle->stream_update.stream = dc_state->streams[k];
2822
2823                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2824                         bundle->surface_updates[m].surface =
2825                                 dc_state->stream_status->plane_states[m];
2826                         bundle->surface_updates[m].surface->force_full_update =
2827                                 true;
2828                 }
2829
2830                 update_planes_and_stream_adapter(dm->dc,
2831                                          UPDATE_TYPE_FULL,
2832                                          dc_state->stream_status->plane_count,
2833                                          dc_state->streams[k],
2834                                          &bundle->stream_update,
2835                                          bundle->surface_updates);
2836         }
2837
2838 cleanup:
2839         kfree(bundle);
2840 }
2841
2842 static int dm_resume(void *handle)
2843 {
2844         struct amdgpu_device *adev = handle;
2845         struct drm_device *ddev = adev_to_drm(adev);
2846         struct amdgpu_display_manager *dm = &adev->dm;
2847         struct amdgpu_dm_connector *aconnector;
2848         struct drm_connector *connector;
2849         struct drm_connector_list_iter iter;
2850         struct drm_crtc *crtc;
2851         struct drm_crtc_state *new_crtc_state;
2852         struct dm_crtc_state *dm_new_crtc_state;
2853         struct drm_plane *plane;
2854         struct drm_plane_state *new_plane_state;
2855         struct dm_plane_state *dm_new_plane_state;
2856         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2857         enum dc_connection_type new_connection_type = dc_connection_none;
2858         struct dc_state *dc_state;
2859         int i, r, j, ret;
2860         bool need_hotplug = false;
2861
2862         if (dm->dc->caps.ips_support) {
2863                 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
2864         }
2865
2866         if (amdgpu_in_reset(adev)) {
2867                 dc_state = dm->cached_dc_state;
2868
2869                 /*
2870                  * The dc->current_state is backed up into dm->cached_dc_state
2871                  * before we commit 0 streams.
2872                  *
2873                  * DC will clear link encoder assignments on the real state
2874                  * but the changes won't propagate over to the copy we made
2875                  * before the 0 streams commit.
2876                  *
2877                  * DC expects that link encoder assignments are *not* valid
2878                  * when committing a state, so as a workaround we can copy
2879                  * off of the current state.
2880                  *
2881                  * We lose the previous assignments, but we had already
2882                  * commit 0 streams anyway.
2883                  */
2884                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2885
2886                 r = dm_dmub_hw_init(adev);
2887                 if (r)
2888                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2889
2890                 dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
2891                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2892
2893                 dc_resume(dm->dc);
2894
2895                 amdgpu_dm_irq_resume_early(adev);
2896
2897                 for (i = 0; i < dc_state->stream_count; i++) {
2898                         dc_state->streams[i]->mode_changed = true;
2899                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2900                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2901                                         = 0xffffffff;
2902                         }
2903                 }
2904
2905                 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2906                         amdgpu_dm_outbox_init(adev);
2907                         dc_enable_dmub_outbox(adev->dm.dc);
2908                 }
2909
2910                 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
2911
2912                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2913
2914                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2915
2916                 dc_state_release(dm->cached_dc_state);
2917                 dm->cached_dc_state = NULL;
2918
2919                 amdgpu_dm_irq_resume_late(adev);
2920
2921                 mutex_unlock(&dm->dc_lock);
2922
2923                 return 0;
2924         }
2925         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2926         dc_state_release(dm_state->context);
2927         dm_state->context = dc_state_create(dm->dc);
2928         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2929
2930         /* Before powering on DC we need to re-initialize DMUB. */
2931         dm_dmub_hw_resume(adev);
2932
2933         /* Re-enable outbox interrupts for DPIA. */
2934         if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2935                 amdgpu_dm_outbox_init(adev);
2936                 dc_enable_dmub_outbox(adev->dm.dc);
2937         }
2938
2939         /* power on hardware */
2940         dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
2941         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2942
2943         /* program HPD filter */
2944         dc_resume(dm->dc);
2945
2946         /*
2947          * early enable HPD Rx IRQ, should be done before set mode as short
2948          * pulse interrupts are used for MST
2949          */
2950         amdgpu_dm_irq_resume_early(adev);
2951
2952         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2953         s3_handle_mst(ddev, false);
2954
2955         /* Do detection*/
2956         drm_connector_list_iter_begin(ddev, &iter);
2957         drm_for_each_connector_iter(connector, &iter) {
2958
2959                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
2960                         continue;
2961
2962                 aconnector = to_amdgpu_dm_connector(connector);
2963
2964                 if (!aconnector->dc_link)
2965                         continue;
2966
2967                 /*
2968                  * this is the case when traversing through already created end sink
2969                  * MST connectors, should be skipped
2970                  */
2971                 if (aconnector && aconnector->mst_root)
2972                         continue;
2973
2974                 mutex_lock(&aconnector->hpd_lock);
2975                 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
2976                         DRM_ERROR("KMS: Failed to detect connector\n");
2977
2978                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2979                         emulated_link_detect(aconnector->dc_link);
2980                 } else {
2981                         mutex_lock(&dm->dc_lock);
2982                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2983                         mutex_unlock(&dm->dc_lock);
2984                 }
2985
2986                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2987                         aconnector->fake_enable = false;
2988
2989                 if (aconnector->dc_sink)
2990                         dc_sink_release(aconnector->dc_sink);
2991                 aconnector->dc_sink = NULL;
2992                 amdgpu_dm_update_connector_after_detect(aconnector);
2993                 mutex_unlock(&aconnector->hpd_lock);
2994         }
2995         drm_connector_list_iter_end(&iter);
2996
2997         /* Force mode set in atomic commit */
2998         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2999                 new_crtc_state->active_changed = true;
3000
3001         /*
3002          * atomic_check is expected to create the dc states. We need to release
3003          * them here, since they were duplicated as part of the suspend
3004          * procedure.
3005          */
3006         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
3007                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
3008                 if (dm_new_crtc_state->stream) {
3009                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
3010                         dc_stream_release(dm_new_crtc_state->stream);
3011                         dm_new_crtc_state->stream = NULL;
3012                 }
3013         }
3014
3015         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
3016                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
3017                 if (dm_new_plane_state->dc_state) {
3018                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
3019                         dc_plane_state_release(dm_new_plane_state->dc_state);
3020                         dm_new_plane_state->dc_state = NULL;
3021                 }
3022         }
3023
3024         drm_atomic_helper_resume(ddev, dm->cached_state);
3025
3026         dm->cached_state = NULL;
3027
3028         /* Do mst topology probing after resuming cached state*/
3029         drm_connector_list_iter_begin(ddev, &iter);
3030         drm_for_each_connector_iter(connector, &iter) {
3031                 aconnector = to_amdgpu_dm_connector(connector);
3032                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
3033                     aconnector->mst_root)
3034                         continue;
3035
3036                 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
3037
3038                 if (ret < 0) {
3039                         dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
3040                                         aconnector->dc_link);
3041                         need_hotplug = true;
3042                 }
3043         }
3044         drm_connector_list_iter_end(&iter);
3045
3046         if (need_hotplug)
3047                 drm_kms_helper_hotplug_event(ddev);
3048
3049         amdgpu_dm_irq_resume_late(adev);
3050
3051         amdgpu_dm_smu_write_watermarks_table(adev);
3052
3053         return 0;
3054 }
3055
3056 /**
3057  * DOC: DM Lifecycle
3058  *
3059  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
3060  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
3061  * the base driver's device list to be initialized and torn down accordingly.
3062  *
3063  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
3064  */
3065
3066 static const struct amd_ip_funcs amdgpu_dm_funcs = {
3067         .name = "dm",
3068         .early_init = dm_early_init,
3069         .late_init = dm_late_init,
3070         .sw_init = dm_sw_init,
3071         .sw_fini = dm_sw_fini,
3072         .early_fini = amdgpu_dm_early_fini,
3073         .hw_init = dm_hw_init,
3074         .hw_fini = dm_hw_fini,
3075         .suspend = dm_suspend,
3076         .resume = dm_resume,
3077         .is_idle = dm_is_idle,
3078         .wait_for_idle = dm_wait_for_idle,
3079         .check_soft_reset = dm_check_soft_reset,
3080         .soft_reset = dm_soft_reset,
3081         .set_clockgating_state = dm_set_clockgating_state,
3082         .set_powergating_state = dm_set_powergating_state,
3083 };
3084
3085 const struct amdgpu_ip_block_version dm_ip_block = {
3086         .type = AMD_IP_BLOCK_TYPE_DCE,
3087         .major = 1,
3088         .minor = 0,
3089         .rev = 0,
3090         .funcs = &amdgpu_dm_funcs,
3091 };
3092
3093
3094 /**
3095  * DOC: atomic
3096  *
3097  * *WIP*
3098  */
3099
3100 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
3101         .fb_create = amdgpu_display_user_framebuffer_create,
3102         .get_format_info = amdgpu_dm_plane_get_format_info,
3103         .atomic_check = amdgpu_dm_atomic_check,
3104         .atomic_commit = drm_atomic_helper_commit,
3105 };
3106
3107 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
3108         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
3109         .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
3110 };
3111
3112 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
3113 {
3114         struct amdgpu_dm_backlight_caps *caps;
3115         struct drm_connector *conn_base;
3116         struct amdgpu_device *adev;
3117         struct drm_luminance_range_info *luminance_range;
3118
3119         if (aconnector->bl_idx == -1 ||
3120             aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
3121                 return;
3122
3123         conn_base = &aconnector->base;
3124         adev = drm_to_adev(conn_base->dev);
3125
3126         caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3127         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
3128         caps->aux_support = false;
3129
3130         if (caps->ext_caps->bits.oled == 1
3131             /*
3132              * ||
3133              * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
3134              * caps->ext_caps->bits.hdr_aux_backlight_control == 1
3135              */)
3136                 caps->aux_support = true;
3137
3138         if (amdgpu_backlight == 0)
3139                 caps->aux_support = false;
3140         else if (amdgpu_backlight == 1)
3141                 caps->aux_support = true;
3142
3143         luminance_range = &conn_base->display_info.luminance_range;
3144
3145         if (luminance_range->max_luminance) {
3146                 caps->aux_min_input_signal = luminance_range->min_luminance;
3147                 caps->aux_max_input_signal = luminance_range->max_luminance;
3148         } else {
3149                 caps->aux_min_input_signal = 0;
3150                 caps->aux_max_input_signal = 512;
3151         }
3152 }
3153
3154 void amdgpu_dm_update_connector_after_detect(
3155                 struct amdgpu_dm_connector *aconnector)
3156 {
3157         struct drm_connector *connector = &aconnector->base;
3158         struct drm_device *dev = connector->dev;
3159         struct dc_sink *sink;
3160
3161         /* MST handled by drm_mst framework */
3162         if (aconnector->mst_mgr.mst_state == true)
3163                 return;
3164
3165         sink = aconnector->dc_link->local_sink;
3166         if (sink)
3167                 dc_sink_retain(sink);
3168
3169         /*
3170          * Edid mgmt connector gets first update only in mode_valid hook and then
3171          * the connector sink is set to either fake or physical sink depends on link status.
3172          * Skip if already done during boot.
3173          */
3174         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3175                         && aconnector->dc_em_sink) {
3176
3177                 /*
3178                  * For S3 resume with headless use eml_sink to fake stream
3179                  * because on resume connector->sink is set to NULL
3180                  */
3181                 mutex_lock(&dev->mode_config.mutex);
3182
3183                 if (sink) {
3184                         if (aconnector->dc_sink) {
3185                                 amdgpu_dm_update_freesync_caps(connector, NULL);
3186                                 /*
3187                                  * retain and release below are used to
3188                                  * bump up refcount for sink because the link doesn't point
3189                                  * to it anymore after disconnect, so on next crtc to connector
3190                                  * reshuffle by UMD we will get into unwanted dc_sink release
3191                                  */
3192                                 dc_sink_release(aconnector->dc_sink);
3193                         }
3194                         aconnector->dc_sink = sink;
3195                         dc_sink_retain(aconnector->dc_sink);
3196                         amdgpu_dm_update_freesync_caps(connector,
3197                                         aconnector->edid);
3198                 } else {
3199                         amdgpu_dm_update_freesync_caps(connector, NULL);
3200                         if (!aconnector->dc_sink) {
3201                                 aconnector->dc_sink = aconnector->dc_em_sink;
3202                                 dc_sink_retain(aconnector->dc_sink);
3203                         }
3204                 }
3205
3206                 mutex_unlock(&dev->mode_config.mutex);
3207
3208                 if (sink)
3209                         dc_sink_release(sink);
3210                 return;
3211         }
3212
3213         /*
3214          * TODO: temporary guard to look for proper fix
3215          * if this sink is MST sink, we should not do anything
3216          */
3217         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3218                 dc_sink_release(sink);
3219                 return;
3220         }
3221
3222         if (aconnector->dc_sink == sink) {
3223                 /*
3224                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
3225                  * Do nothing!!
3226                  */
3227                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3228                                 aconnector->connector_id);
3229                 if (sink)
3230                         dc_sink_release(sink);
3231                 return;
3232         }
3233
3234         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3235                 aconnector->connector_id, aconnector->dc_sink, sink);
3236
3237         mutex_lock(&dev->mode_config.mutex);
3238
3239         /*
3240          * 1. Update status of the drm connector
3241          * 2. Send an event and let userspace tell us what to do
3242          */
3243         if (sink) {
3244                 /*
3245                  * TODO: check if we still need the S3 mode update workaround.
3246                  * If yes, put it here.
3247                  */
3248                 if (aconnector->dc_sink) {
3249                         amdgpu_dm_update_freesync_caps(connector, NULL);
3250                         dc_sink_release(aconnector->dc_sink);
3251                 }
3252
3253                 aconnector->dc_sink = sink;
3254                 dc_sink_retain(aconnector->dc_sink);
3255                 if (sink->dc_edid.length == 0) {
3256                         aconnector->edid = NULL;
3257                         if (aconnector->dc_link->aux_mode) {
3258                                 drm_dp_cec_unset_edid(
3259                                         &aconnector->dm_dp_aux.aux);
3260                         }
3261                 } else {
3262                         aconnector->edid =
3263                                 (struct edid *)sink->dc_edid.raw_edid;
3264
3265                         if (aconnector->dc_link->aux_mode)
3266                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3267                                                     aconnector->edid);
3268                 }
3269
3270                 if (!aconnector->timing_requested) {
3271                         aconnector->timing_requested =
3272                                 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3273                         if (!aconnector->timing_requested)
3274                                 drm_err(dev,
3275                                         "failed to create aconnector->requested_timing\n");
3276                 }
3277
3278                 drm_connector_update_edid_property(connector, aconnector->edid);
3279                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3280                 update_connector_ext_caps(aconnector);
3281         } else {
3282                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3283                 amdgpu_dm_update_freesync_caps(connector, NULL);
3284                 drm_connector_update_edid_property(connector, NULL);
3285                 aconnector->num_modes = 0;
3286                 dc_sink_release(aconnector->dc_sink);
3287                 aconnector->dc_sink = NULL;
3288                 aconnector->edid = NULL;
3289                 kfree(aconnector->timing_requested);
3290                 aconnector->timing_requested = NULL;
3291                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3292                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3293                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3294         }
3295
3296         mutex_unlock(&dev->mode_config.mutex);
3297
3298         update_subconnector_property(aconnector);
3299
3300         if (sink)
3301                 dc_sink_release(sink);
3302 }
3303
3304 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3305 {
3306         struct drm_connector *connector = &aconnector->base;
3307         struct drm_device *dev = connector->dev;
3308         enum dc_connection_type new_connection_type = dc_connection_none;
3309         struct amdgpu_device *adev = drm_to_adev(dev);
3310         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3311         bool ret = false;
3312
3313         if (adev->dm.disable_hpd_irq)
3314                 return;
3315
3316         /*
3317          * In case of failure or MST no need to update connector status or notify the OS
3318          * since (for MST case) MST does this in its own context.
3319          */
3320         mutex_lock(&aconnector->hpd_lock);
3321
3322         if (adev->dm.hdcp_workqueue) {
3323                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3324                 dm_con_state->update_hdcp = true;
3325         }
3326         if (aconnector->fake_enable)
3327                 aconnector->fake_enable = false;
3328
3329         aconnector->timing_changed = false;
3330
3331         if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3332                 DRM_ERROR("KMS: Failed to detect connector\n");
3333
3334         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3335                 emulated_link_detect(aconnector->dc_link);
3336
3337                 drm_modeset_lock_all(dev);
3338                 dm_restore_drm_connector_state(dev, connector);
3339                 drm_modeset_unlock_all(dev);
3340
3341                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3342                         drm_kms_helper_connector_hotplug_event(connector);
3343         } else {
3344                 mutex_lock(&adev->dm.dc_lock);
3345                 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3346                 mutex_unlock(&adev->dm.dc_lock);
3347                 if (ret) {
3348                         amdgpu_dm_update_connector_after_detect(aconnector);
3349
3350                         drm_modeset_lock_all(dev);
3351                         dm_restore_drm_connector_state(dev, connector);
3352                         drm_modeset_unlock_all(dev);
3353
3354                         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3355                                 drm_kms_helper_connector_hotplug_event(connector);
3356                 }
3357         }
3358         mutex_unlock(&aconnector->hpd_lock);
3359
3360 }
3361
3362 static void handle_hpd_irq(void *param)
3363 {
3364         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3365
3366         handle_hpd_irq_helper(aconnector);
3367
3368 }
3369
3370 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3371                                                         union hpd_irq_data hpd_irq_data)
3372 {
3373         struct hpd_rx_irq_offload_work *offload_work =
3374                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3375
3376         if (!offload_work) {
3377                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3378                 return;
3379         }
3380
3381         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3382         offload_work->data = hpd_irq_data;
3383         offload_work->offload_wq = offload_wq;
3384
3385         queue_work(offload_wq->wq, &offload_work->work);
3386         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3387 }
3388
3389 static void handle_hpd_rx_irq(void *param)
3390 {
3391         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3392         struct drm_connector *connector = &aconnector->base;
3393         struct drm_device *dev = connector->dev;
3394         struct dc_link *dc_link = aconnector->dc_link;
3395         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3396         bool result = false;
3397         enum dc_connection_type new_connection_type = dc_connection_none;
3398         struct amdgpu_device *adev = drm_to_adev(dev);
3399         union hpd_irq_data hpd_irq_data;
3400         bool link_loss = false;
3401         bool has_left_work = false;
3402         int idx = dc_link->link_index;
3403         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3404
3405         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3406
3407         if (adev->dm.disable_hpd_irq)
3408                 return;
3409
3410         /*
3411          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3412          * conflict, after implement i2c helper, this mutex should be
3413          * retired.
3414          */
3415         mutex_lock(&aconnector->hpd_lock);
3416
3417         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3418                                                 &link_loss, true, &has_left_work);
3419
3420         if (!has_left_work)
3421                 goto out;
3422
3423         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3424                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3425                 goto out;
3426         }
3427
3428         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3429                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3430                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3431                         bool skip = false;
3432
3433                         /*
3434                          * DOWN_REP_MSG_RDY is also handled by polling method
3435                          * mgr->cbs->poll_hpd_irq()
3436                          */
3437                         spin_lock(&offload_wq->offload_lock);
3438                         skip = offload_wq->is_handling_mst_msg_rdy_event;
3439
3440                         if (!skip)
3441                                 offload_wq->is_handling_mst_msg_rdy_event = true;
3442
3443                         spin_unlock(&offload_wq->offload_lock);
3444
3445                         if (!skip)
3446                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3447
3448                         goto out;
3449                 }
3450
3451                 if (link_loss) {
3452                         bool skip = false;
3453
3454                         spin_lock(&offload_wq->offload_lock);
3455                         skip = offload_wq->is_handling_link_loss;
3456
3457                         if (!skip)
3458                                 offload_wq->is_handling_link_loss = true;
3459
3460                         spin_unlock(&offload_wq->offload_lock);
3461
3462                         if (!skip)
3463                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3464
3465                         goto out;
3466                 }
3467         }
3468
3469 out:
3470         if (result && !is_mst_root_connector) {
3471                 /* Downstream Port status changed. */
3472                 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
3473                         DRM_ERROR("KMS: Failed to detect connector\n");
3474
3475                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3476                         emulated_link_detect(dc_link);
3477
3478                         if (aconnector->fake_enable)
3479                                 aconnector->fake_enable = false;
3480
3481                         amdgpu_dm_update_connector_after_detect(aconnector);
3482
3483
3484                         drm_modeset_lock_all(dev);
3485                         dm_restore_drm_connector_state(dev, connector);
3486                         drm_modeset_unlock_all(dev);
3487
3488                         drm_kms_helper_connector_hotplug_event(connector);
3489                 } else {
3490                         bool ret = false;
3491
3492                         mutex_lock(&adev->dm.dc_lock);
3493                         ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3494                         mutex_unlock(&adev->dm.dc_lock);
3495
3496                         if (ret) {
3497                                 if (aconnector->fake_enable)
3498                                         aconnector->fake_enable = false;
3499
3500                                 amdgpu_dm_update_connector_after_detect(aconnector);
3501
3502                                 drm_modeset_lock_all(dev);
3503                                 dm_restore_drm_connector_state(dev, connector);
3504                                 drm_modeset_unlock_all(dev);
3505
3506                                 drm_kms_helper_connector_hotplug_event(connector);
3507                         }
3508                 }
3509         }
3510         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3511                 if (adev->dm.hdcp_workqueue)
3512                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3513         }
3514
3515         if (dc_link->type != dc_connection_mst_branch)
3516                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3517
3518         mutex_unlock(&aconnector->hpd_lock);
3519 }
3520
3521 static void register_hpd_handlers(struct amdgpu_device *adev)
3522 {
3523         struct drm_device *dev = adev_to_drm(adev);
3524         struct drm_connector *connector;
3525         struct amdgpu_dm_connector *aconnector;
3526         const struct dc_link *dc_link;
3527         struct dc_interrupt_params int_params = {0};
3528
3529         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3530         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3531
3532         list_for_each_entry(connector,
3533                         &dev->mode_config.connector_list, head) {
3534
3535                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
3536                         continue;
3537
3538                 aconnector = to_amdgpu_dm_connector(connector);
3539                 dc_link = aconnector->dc_link;
3540
3541                 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
3542                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3543                         int_params.irq_source = dc_link->irq_source_hpd;
3544
3545                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3546                                         handle_hpd_irq,
3547                                         (void *) aconnector);
3548                 }
3549
3550                 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
3551
3552                         /* Also register for DP short pulse (hpd_rx). */
3553                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3554                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3555
3556                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3557                                         handle_hpd_rx_irq,
3558                                         (void *) aconnector);
3559                 }
3560
3561                 if (adev->dm.hpd_rx_offload_wq)
3562                         adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3563                                 aconnector;
3564         }
3565 }
3566
3567 #if defined(CONFIG_DRM_AMD_DC_SI)
3568 /* Register IRQ sources and initialize IRQ callbacks */
3569 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3570 {
3571         struct dc *dc = adev->dm.dc;
3572         struct common_irq_params *c_irq_params;
3573         struct dc_interrupt_params int_params = {0};
3574         int r;
3575         int i;
3576         unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3577
3578         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3579         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3580
3581         /*
3582          * Actions of amdgpu_irq_add_id():
3583          * 1. Register a set() function with base driver.
3584          *    Base driver will call set() function to enable/disable an
3585          *    interrupt in DC hardware.
3586          * 2. Register amdgpu_dm_irq_handler().
3587          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3588          *    coming from DC hardware.
3589          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3590          *    for acknowledging and handling.
3591          */
3592
3593         /* Use VBLANK interrupt */
3594         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3595                 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
3596                 if (r) {
3597                         DRM_ERROR("Failed to add crtc irq id!\n");
3598                         return r;
3599                 }
3600
3601                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3602                 int_params.irq_source =
3603                         dc_interrupt_to_irq_source(dc, i + 1, 0);
3604
3605                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3606
3607                 c_irq_params->adev = adev;
3608                 c_irq_params->irq_src = int_params.irq_source;
3609
3610                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3611                                 dm_crtc_high_irq, c_irq_params);
3612         }
3613
3614         /* Use GRPH_PFLIP interrupt */
3615         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3616                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3617                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3618                 if (r) {
3619                         DRM_ERROR("Failed to add page flip irq id!\n");
3620                         return r;
3621                 }
3622
3623                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3624                 int_params.irq_source =
3625                         dc_interrupt_to_irq_source(dc, i, 0);
3626
3627                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3628
3629                 c_irq_params->adev = adev;
3630                 c_irq_params->irq_src = int_params.irq_source;
3631
3632                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3633                                 dm_pflip_high_irq, c_irq_params);
3634
3635         }
3636
3637         /* HPD */
3638         r = amdgpu_irq_add_id(adev, client_id,
3639                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3640         if (r) {
3641                 DRM_ERROR("Failed to add hpd irq id!\n");
3642                 return r;
3643         }
3644
3645         register_hpd_handlers(adev);
3646
3647         return 0;
3648 }
3649 #endif
3650
3651 /* Register IRQ sources and initialize IRQ callbacks */
3652 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3653 {
3654         struct dc *dc = adev->dm.dc;
3655         struct common_irq_params *c_irq_params;
3656         struct dc_interrupt_params int_params = {0};
3657         int r;
3658         int i;
3659         unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3660
3661         if (adev->family >= AMDGPU_FAMILY_AI)
3662                 client_id = SOC15_IH_CLIENTID_DCE;
3663
3664         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3665         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3666
3667         /*
3668          * Actions of amdgpu_irq_add_id():
3669          * 1. Register a set() function with base driver.
3670          *    Base driver will call set() function to enable/disable an
3671          *    interrupt in DC hardware.
3672          * 2. Register amdgpu_dm_irq_handler().
3673          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3674          *    coming from DC hardware.
3675          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3676          *    for acknowledging and handling.
3677          */
3678
3679         /* Use VBLANK interrupt */
3680         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3681                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3682                 if (r) {
3683                         DRM_ERROR("Failed to add crtc irq id!\n");
3684                         return r;
3685                 }
3686
3687                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3688                 int_params.irq_source =
3689                         dc_interrupt_to_irq_source(dc, i, 0);
3690
3691                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3692
3693                 c_irq_params->adev = adev;
3694                 c_irq_params->irq_src = int_params.irq_source;
3695
3696                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3697                                 dm_crtc_high_irq, c_irq_params);
3698         }
3699
3700         /* Use VUPDATE interrupt */
3701         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3702                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3703                 if (r) {
3704                         DRM_ERROR("Failed to add vupdate irq id!\n");
3705                         return r;
3706                 }
3707
3708                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3709                 int_params.irq_source =
3710                         dc_interrupt_to_irq_source(dc, i, 0);
3711
3712                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3713
3714                 c_irq_params->adev = adev;
3715                 c_irq_params->irq_src = int_params.irq_source;
3716
3717                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3718                                 dm_vupdate_high_irq, c_irq_params);
3719         }
3720
3721         /* Use GRPH_PFLIP interrupt */
3722         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3723                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3724                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3725                 if (r) {
3726                         DRM_ERROR("Failed to add page flip irq id!\n");
3727                         return r;
3728                 }
3729
3730                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3731                 int_params.irq_source =
3732                         dc_interrupt_to_irq_source(dc, i, 0);
3733
3734                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3735
3736                 c_irq_params->adev = adev;
3737                 c_irq_params->irq_src = int_params.irq_source;
3738
3739                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3740                                 dm_pflip_high_irq, c_irq_params);
3741
3742         }
3743
3744         /* HPD */
3745         r = amdgpu_irq_add_id(adev, client_id,
3746                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3747         if (r) {
3748                 DRM_ERROR("Failed to add hpd irq id!\n");
3749                 return r;
3750         }
3751
3752         register_hpd_handlers(adev);
3753
3754         return 0;
3755 }
3756
3757 /* Register IRQ sources and initialize IRQ callbacks */
3758 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3759 {
3760         struct dc *dc = adev->dm.dc;
3761         struct common_irq_params *c_irq_params;
3762         struct dc_interrupt_params int_params = {0};
3763         int r;
3764         int i;
3765 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3766         static const unsigned int vrtl_int_srcid[] = {
3767                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3768                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3769                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3770                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3771                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3772                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3773         };
3774 #endif
3775
3776         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3777         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3778
3779         /*
3780          * Actions of amdgpu_irq_add_id():
3781          * 1. Register a set() function with base driver.
3782          *    Base driver will call set() function to enable/disable an
3783          *    interrupt in DC hardware.
3784          * 2. Register amdgpu_dm_irq_handler().
3785          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3786          *    coming from DC hardware.
3787          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3788          *    for acknowledging and handling.
3789          */
3790
3791         /* Use VSTARTUP interrupt */
3792         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3793                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3794                         i++) {
3795                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3796
3797                 if (r) {
3798                         DRM_ERROR("Failed to add crtc irq id!\n");
3799                         return r;
3800                 }
3801
3802                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3803                 int_params.irq_source =
3804                         dc_interrupt_to_irq_source(dc, i, 0);
3805
3806                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3807
3808                 c_irq_params->adev = adev;
3809                 c_irq_params->irq_src = int_params.irq_source;
3810
3811                 amdgpu_dm_irq_register_interrupt(
3812                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3813         }
3814
3815         /* Use otg vertical line interrupt */
3816 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3817         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3818                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3819                                 vrtl_int_srcid[i], &adev->vline0_irq);
3820
3821                 if (r) {
3822                         DRM_ERROR("Failed to add vline0 irq id!\n");
3823                         return r;
3824                 }
3825
3826                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3827                 int_params.irq_source =
3828                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3829
3830                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3831                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3832                         break;
3833                 }
3834
3835                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3836                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3837
3838                 c_irq_params->adev = adev;
3839                 c_irq_params->irq_src = int_params.irq_source;
3840
3841                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3842                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3843         }
3844 #endif
3845
3846         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3847          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3848          * to trigger at end of each vblank, regardless of state of the lock,
3849          * matching DCE behaviour.
3850          */
3851         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3852              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3853              i++) {
3854                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3855
3856                 if (r) {
3857                         DRM_ERROR("Failed to add vupdate irq id!\n");
3858                         return r;
3859                 }
3860
3861                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3862                 int_params.irq_source =
3863                         dc_interrupt_to_irq_source(dc, i, 0);
3864
3865                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3866
3867                 c_irq_params->adev = adev;
3868                 c_irq_params->irq_src = int_params.irq_source;
3869
3870                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3871                                 dm_vupdate_high_irq, c_irq_params);
3872         }
3873
3874         /* Use GRPH_PFLIP interrupt */
3875         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3876                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3877                         i++) {
3878                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3879                 if (r) {
3880                         DRM_ERROR("Failed to add page flip irq id!\n");
3881                         return r;
3882                 }
3883
3884                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3885                 int_params.irq_source =
3886                         dc_interrupt_to_irq_source(dc, i, 0);
3887
3888                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3889
3890                 c_irq_params->adev = adev;
3891                 c_irq_params->irq_src = int_params.irq_source;
3892
3893                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3894                                 dm_pflip_high_irq, c_irq_params);
3895
3896         }
3897
3898         /* HPD */
3899         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3900                         &adev->hpd_irq);
3901         if (r) {
3902                 DRM_ERROR("Failed to add hpd irq id!\n");
3903                 return r;
3904         }
3905
3906         register_hpd_handlers(adev);
3907
3908         return 0;
3909 }
3910 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3911 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3912 {
3913         struct dc *dc = adev->dm.dc;
3914         struct common_irq_params *c_irq_params;
3915         struct dc_interrupt_params int_params = {0};
3916         int r, i;
3917
3918         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3919         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3920
3921         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3922                         &adev->dmub_outbox_irq);
3923         if (r) {
3924                 DRM_ERROR("Failed to add outbox irq id!\n");
3925                 return r;
3926         }
3927
3928         if (dc->ctx->dmub_srv) {
3929                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3930                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3931                 int_params.irq_source =
3932                 dc_interrupt_to_irq_source(dc, i, 0);
3933
3934                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3935
3936                 c_irq_params->adev = adev;
3937                 c_irq_params->irq_src = int_params.irq_source;
3938
3939                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3940                                 dm_dmub_outbox1_low_irq, c_irq_params);
3941         }
3942
3943         return 0;
3944 }
3945
3946 /*
3947  * Acquires the lock for the atomic state object and returns
3948  * the new atomic state.
3949  *
3950  * This should only be called during atomic check.
3951  */
3952 int dm_atomic_get_state(struct drm_atomic_state *state,
3953                         struct dm_atomic_state **dm_state)
3954 {
3955         struct drm_device *dev = state->dev;
3956         struct amdgpu_device *adev = drm_to_adev(dev);
3957         struct amdgpu_display_manager *dm = &adev->dm;
3958         struct drm_private_state *priv_state;
3959
3960         if (*dm_state)
3961                 return 0;
3962
3963         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3964         if (IS_ERR(priv_state))
3965                 return PTR_ERR(priv_state);
3966
3967         *dm_state = to_dm_atomic_state(priv_state);
3968
3969         return 0;
3970 }
3971
3972 static struct dm_atomic_state *
3973 dm_atomic_get_new_state(struct drm_atomic_state *state)
3974 {
3975         struct drm_device *dev = state->dev;
3976         struct amdgpu_device *adev = drm_to_adev(dev);
3977         struct amdgpu_display_manager *dm = &adev->dm;
3978         struct drm_private_obj *obj;
3979         struct drm_private_state *new_obj_state;
3980         int i;
3981
3982         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3983                 if (obj->funcs == dm->atomic_obj.funcs)
3984                         return to_dm_atomic_state(new_obj_state);
3985         }
3986
3987         return NULL;
3988 }
3989
3990 static struct drm_private_state *
3991 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3992 {
3993         struct dm_atomic_state *old_state, *new_state;
3994
3995         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3996         if (!new_state)
3997                 return NULL;
3998
3999         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
4000
4001         old_state = to_dm_atomic_state(obj->state);
4002
4003         if (old_state && old_state->context)
4004                 new_state->context = dc_state_create_copy(old_state->context);
4005
4006         if (!new_state->context) {
4007                 kfree(new_state);
4008                 return NULL;
4009         }
4010
4011         return &new_state->base;
4012 }
4013
4014 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
4015                                     struct drm_private_state *state)
4016 {
4017         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4018
4019         if (dm_state && dm_state->context)
4020                 dc_state_release(dm_state->context);
4021
4022         kfree(dm_state);
4023 }
4024
4025 static struct drm_private_state_funcs dm_atomic_state_funcs = {
4026         .atomic_duplicate_state = dm_atomic_duplicate_state,
4027         .atomic_destroy_state = dm_atomic_destroy_state,
4028 };
4029
4030 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
4031 {
4032         struct dm_atomic_state *state;
4033         int r;
4034
4035         adev->mode_info.mode_config_initialized = true;
4036
4037         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
4038         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
4039
4040         adev_to_drm(adev)->mode_config.max_width = 16384;
4041         adev_to_drm(adev)->mode_config.max_height = 16384;
4042
4043         adev_to_drm(adev)->mode_config.preferred_depth = 24;
4044         if (adev->asic_type == CHIP_HAWAII)
4045                 /* disable prefer shadow for now due to hibernation issues */
4046                 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
4047         else
4048                 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
4049         /* indicates support for immediate flip */
4050         adev_to_drm(adev)->mode_config.async_page_flip = true;
4051
4052         state = kzalloc(sizeof(*state), GFP_KERNEL);
4053         if (!state)
4054                 return -ENOMEM;
4055
4056         state->context = dc_state_create_current_copy(adev->dm.dc);
4057         if (!state->context) {
4058                 kfree(state);
4059                 return -ENOMEM;
4060         }
4061
4062         drm_atomic_private_obj_init(adev_to_drm(adev),
4063                                     &adev->dm.atomic_obj,
4064                                     &state->base,
4065                                     &dm_atomic_state_funcs);
4066
4067         r = amdgpu_display_modeset_create_props(adev);
4068         if (r) {
4069                 dc_state_release(state->context);
4070                 kfree(state);
4071                 return r;
4072         }
4073
4074 #ifdef AMD_PRIVATE_COLOR
4075         if (amdgpu_dm_create_color_properties(adev))
4076                 return -ENOMEM;
4077 #endif
4078
4079         r = amdgpu_dm_audio_init(adev);
4080         if (r) {
4081                 dc_state_release(state->context);
4082                 kfree(state);
4083                 return r;
4084         }
4085
4086         return 0;
4087 }
4088
4089 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4090 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
4091 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
4092
4093 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4094                                             int bl_idx)
4095 {
4096 #if defined(CONFIG_ACPI)
4097         struct amdgpu_dm_backlight_caps caps;
4098
4099         memset(&caps, 0, sizeof(caps));
4100
4101         if (dm->backlight_caps[bl_idx].caps_valid)
4102                 return;
4103
4104         amdgpu_acpi_get_backlight_caps(&caps);
4105         if (caps.caps_valid) {
4106                 dm->backlight_caps[bl_idx].caps_valid = true;
4107                 if (caps.aux_support)
4108                         return;
4109                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
4110                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
4111         } else {
4112                 dm->backlight_caps[bl_idx].min_input_signal =
4113                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4114                 dm->backlight_caps[bl_idx].max_input_signal =
4115                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4116         }
4117 #else
4118         if (dm->backlight_caps[bl_idx].aux_support)
4119                 return;
4120
4121         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4122         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4123 #endif
4124 }
4125
4126 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4127                                 unsigned int *min, unsigned int *max)
4128 {
4129         if (!caps)
4130                 return 0;
4131
4132         if (caps->aux_support) {
4133                 // Firmware limits are in nits, DC API wants millinits.
4134                 *max = 1000 * caps->aux_max_input_signal;
4135                 *min = 1000 * caps->aux_min_input_signal;
4136         } else {
4137                 // Firmware limits are 8-bit, PWM control is 16-bit.
4138                 *max = 0x101 * caps->max_input_signal;
4139                 *min = 0x101 * caps->min_input_signal;
4140         }
4141         return 1;
4142 }
4143
4144 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4145                                         uint32_t brightness)
4146 {
4147         unsigned int min, max;
4148
4149         if (!get_brightness_range(caps, &min, &max))
4150                 return brightness;
4151
4152         // Rescale 0..255 to min..max
4153         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4154                                        AMDGPU_MAX_BL_LEVEL);
4155 }
4156
4157 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4158                                       uint32_t brightness)
4159 {
4160         unsigned int min, max;
4161
4162         if (!get_brightness_range(caps, &min, &max))
4163                 return brightness;
4164
4165         if (brightness < min)
4166                 return 0;
4167         // Rescale min..max to 0..255
4168         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4169                                  max - min);
4170 }
4171
4172 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4173                                          int bl_idx,
4174                                          u32 user_brightness)
4175 {
4176         struct amdgpu_dm_backlight_caps caps;
4177         struct dc_link *link;
4178         u32 brightness;
4179         bool rc;
4180
4181         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4182         caps = dm->backlight_caps[bl_idx];
4183
4184         dm->brightness[bl_idx] = user_brightness;
4185         /* update scratch register */
4186         if (bl_idx == 0)
4187                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4188         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4189         link = (struct dc_link *)dm->backlight_link[bl_idx];
4190
4191         /* Change brightness based on AUX property */
4192         if (caps.aux_support) {
4193                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4194                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4195                 if (!rc)
4196                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4197         } else {
4198                 rc = dc_link_set_backlight_level(link, brightness, 0);
4199                 if (!rc)
4200                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4201         }
4202
4203         if (rc)
4204                 dm->actual_brightness[bl_idx] = user_brightness;
4205 }
4206
4207 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4208 {
4209         struct amdgpu_display_manager *dm = bl_get_data(bd);
4210         int i;
4211
4212         for (i = 0; i < dm->num_of_edps; i++) {
4213                 if (bd == dm->backlight_dev[i])
4214                         break;
4215         }
4216         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4217                 i = 0;
4218         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4219
4220         return 0;
4221 }
4222
4223 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4224                                          int bl_idx)
4225 {
4226         int ret;
4227         struct amdgpu_dm_backlight_caps caps;
4228         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4229
4230         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4231         caps = dm->backlight_caps[bl_idx];
4232
4233         if (caps.aux_support) {
4234                 u32 avg, peak;
4235                 bool rc;
4236
4237                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4238                 if (!rc)
4239                         return dm->brightness[bl_idx];
4240                 return convert_brightness_to_user(&caps, avg);
4241         }
4242
4243         ret = dc_link_get_backlight_level(link);
4244
4245         if (ret == DC_ERROR_UNEXPECTED)
4246                 return dm->brightness[bl_idx];
4247
4248         return convert_brightness_to_user(&caps, ret);
4249 }
4250
4251 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4252 {
4253         struct amdgpu_display_manager *dm = bl_get_data(bd);
4254         int i;
4255
4256         for (i = 0; i < dm->num_of_edps; i++) {
4257                 if (bd == dm->backlight_dev[i])
4258                         break;
4259         }
4260         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4261                 i = 0;
4262         return amdgpu_dm_backlight_get_level(dm, i);
4263 }
4264
4265 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4266         .options = BL_CORE_SUSPENDRESUME,
4267         .get_brightness = amdgpu_dm_backlight_get_brightness,
4268         .update_status  = amdgpu_dm_backlight_update_status,
4269 };
4270
4271 static void
4272 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
4273 {
4274         struct drm_device *drm = aconnector->base.dev;
4275         struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4276         struct backlight_properties props = { 0 };
4277         char bl_name[16];
4278
4279         if (aconnector->bl_idx == -1)
4280                 return;
4281
4282         if (!acpi_video_backlight_use_native()) {
4283                 drm_info(drm, "Skipping amdgpu DM backlight registration\n");
4284                 /* Try registering an ACPI video backlight device instead. */
4285                 acpi_video_register_backlight();
4286                 return;
4287         }
4288
4289         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4290         props.brightness = AMDGPU_MAX_BL_LEVEL;
4291         props.type = BACKLIGHT_RAW;
4292
4293         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4294                  drm->primary->index + aconnector->bl_idx);
4295
4296         dm->backlight_dev[aconnector->bl_idx] =
4297                 backlight_device_register(bl_name, aconnector->base.kdev, dm,
4298                                           &amdgpu_dm_backlight_ops, &props);
4299
4300         if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
4301                 DRM_ERROR("DM: Backlight registration failed!\n");
4302                 dm->backlight_dev[aconnector->bl_idx] = NULL;
4303         } else
4304                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4305 }
4306
4307 static int initialize_plane(struct amdgpu_display_manager *dm,
4308                             struct amdgpu_mode_info *mode_info, int plane_id,
4309                             enum drm_plane_type plane_type,
4310                             const struct dc_plane_cap *plane_cap)
4311 {
4312         struct drm_plane *plane;
4313         unsigned long possible_crtcs;
4314         int ret = 0;
4315
4316         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4317         if (!plane) {
4318                 DRM_ERROR("KMS: Failed to allocate plane\n");
4319                 return -ENOMEM;
4320         }
4321         plane->type = plane_type;
4322
4323         /*
4324          * HACK: IGT tests expect that the primary plane for a CRTC
4325          * can only have one possible CRTC. Only expose support for
4326          * any CRTC if they're not going to be used as a primary plane
4327          * for a CRTC - like overlay or underlay planes.
4328          */
4329         possible_crtcs = 1 << plane_id;
4330         if (plane_id >= dm->dc->caps.max_streams)
4331                 possible_crtcs = 0xff;
4332
4333         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4334
4335         if (ret) {
4336                 DRM_ERROR("KMS: Failed to initialize plane\n");
4337                 kfree(plane);
4338                 return ret;
4339         }
4340
4341         if (mode_info)
4342                 mode_info->planes[plane_id] = plane;
4343
4344         return ret;
4345 }
4346
4347
4348 static void setup_backlight_device(struct amdgpu_display_manager *dm,
4349                                    struct amdgpu_dm_connector *aconnector)
4350 {
4351         struct dc_link *link = aconnector->dc_link;
4352         int bl_idx = dm->num_of_edps;
4353
4354         if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
4355             link->type == dc_connection_none)
4356                 return;
4357
4358         if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
4359                 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
4360                 return;
4361         }
4362
4363         aconnector->bl_idx = bl_idx;
4364
4365         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4366         dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
4367         dm->backlight_link[bl_idx] = link;
4368         dm->num_of_edps++;
4369
4370         update_connector_ext_caps(aconnector);
4371 }
4372
4373 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4374
4375 /*
4376  * In this architecture, the association
4377  * connector -> encoder -> crtc
4378  * id not really requried. The crtc and connector will hold the
4379  * display_index as an abstraction to use with DAL component
4380  *
4381  * Returns 0 on success
4382  */
4383 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4384 {
4385         struct amdgpu_display_manager *dm = &adev->dm;
4386         s32 i;
4387         struct amdgpu_dm_connector *aconnector = NULL;
4388         struct amdgpu_encoder *aencoder = NULL;
4389         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4390         u32 link_cnt;
4391         s32 primary_planes;
4392         enum dc_connection_type new_connection_type = dc_connection_none;
4393         const struct dc_plane_cap *plane;
4394         bool psr_feature_enabled = false;
4395         int max_overlay = dm->dc->caps.max_slave_planes;
4396
4397         dm->display_indexes_num = dm->dc->caps.max_streams;
4398         /* Update the actual used number of crtc */
4399         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4400
4401         amdgpu_dm_set_irq_funcs(adev);
4402
4403         link_cnt = dm->dc->caps.max_links;
4404         if (amdgpu_dm_mode_config_init(dm->adev)) {
4405                 DRM_ERROR("DM: Failed to initialize mode config\n");
4406                 return -EINVAL;
4407         }
4408
4409         /* There is one primary plane per CRTC */
4410         primary_planes = dm->dc->caps.max_streams;
4411         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4412
4413         /*
4414          * Initialize primary planes, implicit planes for legacy IOCTLS.
4415          * Order is reversed to match iteration order in atomic check.
4416          */
4417         for (i = (primary_planes - 1); i >= 0; i--) {
4418                 plane = &dm->dc->caps.planes[i];
4419
4420                 if (initialize_plane(dm, mode_info, i,
4421                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4422                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4423                         goto fail;
4424                 }
4425         }
4426
4427         /*
4428          * Initialize overlay planes, index starting after primary planes.
4429          * These planes have a higher DRM index than the primary planes since
4430          * they should be considered as having a higher z-order.
4431          * Order is reversed to match iteration order in atomic check.
4432          *
4433          * Only support DCN for now, and only expose one so we don't encourage
4434          * userspace to use up all the pipes.
4435          */
4436         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4437                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4438
4439                 /* Do not create overlay if MPO disabled */
4440                 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4441                         break;
4442
4443                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4444                         continue;
4445
4446                 if (!plane->pixel_format_support.argb8888)
4447                         continue;
4448
4449                 if (max_overlay-- == 0)
4450                         break;
4451
4452                 if (initialize_plane(dm, NULL, primary_planes + i,
4453                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4454                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4455                         goto fail;
4456                 }
4457         }
4458
4459         for (i = 0; i < dm->dc->caps.max_streams; i++)
4460                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4461                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4462                         goto fail;
4463                 }
4464
4465         /* Use Outbox interrupt */
4466         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4467         case IP_VERSION(3, 0, 0):
4468         case IP_VERSION(3, 1, 2):
4469         case IP_VERSION(3, 1, 3):
4470         case IP_VERSION(3, 1, 4):
4471         case IP_VERSION(3, 1, 5):
4472         case IP_VERSION(3, 1, 6):
4473         case IP_VERSION(3, 2, 0):
4474         case IP_VERSION(3, 2, 1):
4475         case IP_VERSION(2, 1, 0):
4476         case IP_VERSION(3, 5, 0):
4477                 if (register_outbox_irq_handlers(dm->adev)) {
4478                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4479                         goto fail;
4480                 }
4481                 break;
4482         default:
4483                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4484                               amdgpu_ip_version(adev, DCE_HWIP, 0));
4485         }
4486
4487         /* Determine whether to enable PSR support by default. */
4488         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4489                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4490                 case IP_VERSION(3, 1, 2):
4491                 case IP_VERSION(3, 1, 3):
4492                 case IP_VERSION(3, 1, 4):
4493                 case IP_VERSION(3, 1, 5):
4494                 case IP_VERSION(3, 1, 6):
4495                 case IP_VERSION(3, 2, 0):
4496                 case IP_VERSION(3, 2, 1):
4497                 case IP_VERSION(3, 5, 0):
4498                         psr_feature_enabled = true;
4499                         break;
4500                 default:
4501                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4502                         break;
4503                 }
4504         }
4505
4506         /* loops over all connectors on the board */
4507         for (i = 0; i < link_cnt; i++) {
4508                 struct dc_link *link = NULL;
4509
4510                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4511                         DRM_ERROR(
4512                                 "KMS: Cannot support more than %d display indexes\n",
4513                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4514                         continue;
4515                 }
4516
4517                 link = dc_get_link_at_index(dm->dc, i);
4518
4519                 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
4520                         struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
4521
4522                         if (!wbcon) {
4523                                 DRM_ERROR("KMS: Failed to allocate writeback connector\n");
4524                                 continue;
4525                         }
4526
4527                         if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
4528                                 DRM_ERROR("KMS: Failed to initialize writeback connector\n");
4529                                 kfree(wbcon);
4530                                 continue;
4531                         }
4532
4533                         link->psr_settings.psr_feature_enabled = false;
4534                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
4535
4536                         continue;
4537                 }
4538
4539                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4540                 if (!aconnector)
4541                         goto fail;
4542
4543                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4544                 if (!aencoder)
4545                         goto fail;
4546
4547                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4548                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4549                         goto fail;
4550                 }
4551
4552                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4553                         DRM_ERROR("KMS: Failed to initialize connector\n");
4554                         goto fail;
4555                 }
4556
4557                 if (!dc_link_detect_connection_type(link, &new_connection_type))
4558                         DRM_ERROR("KMS: Failed to detect connector\n");
4559
4560                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4561                         emulated_link_detect(link);
4562                         amdgpu_dm_update_connector_after_detect(aconnector);
4563                 } else {
4564                         bool ret = false;
4565
4566                         mutex_lock(&dm->dc_lock);
4567                         ret = dc_link_detect(link, DETECT_REASON_BOOT);
4568                         mutex_unlock(&dm->dc_lock);
4569
4570                         if (ret) {
4571                                 amdgpu_dm_update_connector_after_detect(aconnector);
4572                                 setup_backlight_device(dm, aconnector);
4573
4574                                 if (psr_feature_enabled)
4575                                         amdgpu_dm_set_psr_caps(link);
4576
4577                                 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4578                                  * PSR is also supported.
4579                                  */
4580                                 if (link->psr_settings.psr_feature_enabled)
4581                                         adev_to_drm(adev)->vblank_disable_immediate = false;
4582                         }
4583                 }
4584                 amdgpu_set_panel_orientation(&aconnector->base);
4585         }
4586
4587         /* Software is initialized. Now we can register interrupt handlers. */
4588         switch (adev->asic_type) {
4589 #if defined(CONFIG_DRM_AMD_DC_SI)
4590         case CHIP_TAHITI:
4591         case CHIP_PITCAIRN:
4592         case CHIP_VERDE:
4593         case CHIP_OLAND:
4594                 if (dce60_register_irq_handlers(dm->adev)) {
4595                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4596                         goto fail;
4597                 }
4598                 break;
4599 #endif
4600         case CHIP_BONAIRE:
4601         case CHIP_HAWAII:
4602         case CHIP_KAVERI:
4603         case CHIP_KABINI:
4604         case CHIP_MULLINS:
4605         case CHIP_TONGA:
4606         case CHIP_FIJI:
4607         case CHIP_CARRIZO:
4608         case CHIP_STONEY:
4609         case CHIP_POLARIS11:
4610         case CHIP_POLARIS10:
4611         case CHIP_POLARIS12:
4612         case CHIP_VEGAM:
4613         case CHIP_VEGA10:
4614         case CHIP_VEGA12:
4615         case CHIP_VEGA20:
4616                 if (dce110_register_irq_handlers(dm->adev)) {
4617                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4618                         goto fail;
4619                 }
4620                 break;
4621         default:
4622                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4623                 case IP_VERSION(1, 0, 0):
4624                 case IP_VERSION(1, 0, 1):
4625                 case IP_VERSION(2, 0, 2):
4626                 case IP_VERSION(2, 0, 3):
4627                 case IP_VERSION(2, 0, 0):
4628                 case IP_VERSION(2, 1, 0):
4629                 case IP_VERSION(3, 0, 0):
4630                 case IP_VERSION(3, 0, 2):
4631                 case IP_VERSION(3, 0, 3):
4632                 case IP_VERSION(3, 0, 1):
4633                 case IP_VERSION(3, 1, 2):
4634                 case IP_VERSION(3, 1, 3):
4635                 case IP_VERSION(3, 1, 4):
4636                 case IP_VERSION(3, 1, 5):
4637                 case IP_VERSION(3, 1, 6):
4638                 case IP_VERSION(3, 2, 0):
4639                 case IP_VERSION(3, 2, 1):
4640                 case IP_VERSION(3, 5, 0):
4641                         if (dcn10_register_irq_handlers(dm->adev)) {
4642                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4643                                 goto fail;
4644                         }
4645                         break;
4646                 default:
4647                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4648                                         amdgpu_ip_version(adev, DCE_HWIP, 0));
4649                         goto fail;
4650                 }
4651                 break;
4652         }
4653
4654         return 0;
4655 fail:
4656         kfree(aencoder);
4657         kfree(aconnector);
4658
4659         return -EINVAL;
4660 }
4661
4662 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4663 {
4664         drm_atomic_private_obj_fini(&dm->atomic_obj);
4665 }
4666
4667 /******************************************************************************
4668  * amdgpu_display_funcs functions
4669  *****************************************************************************/
4670
4671 /*
4672  * dm_bandwidth_update - program display watermarks
4673  *
4674  * @adev: amdgpu_device pointer
4675  *
4676  * Calculate and program the display watermarks and line buffer allocation.
4677  */
4678 static void dm_bandwidth_update(struct amdgpu_device *adev)
4679 {
4680         /* TODO: implement later */
4681 }
4682
4683 static const struct amdgpu_display_funcs dm_display_funcs = {
4684         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4685         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4686         .backlight_set_level = NULL, /* never called for DC */
4687         .backlight_get_level = NULL, /* never called for DC */
4688         .hpd_sense = NULL,/* called unconditionally */
4689         .hpd_set_polarity = NULL, /* called unconditionally */
4690         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4691         .page_flip_get_scanoutpos =
4692                 dm_crtc_get_scanoutpos,/* called unconditionally */
4693         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4694         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4695 };
4696
4697 #if defined(CONFIG_DEBUG_KERNEL_DC)
4698
4699 static ssize_t s3_debug_store(struct device *device,
4700                               struct device_attribute *attr,
4701                               const char *buf,
4702                               size_t count)
4703 {
4704         int ret;
4705         int s3_state;
4706         struct drm_device *drm_dev = dev_get_drvdata(device);
4707         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4708
4709         ret = kstrtoint(buf, 0, &s3_state);
4710
4711         if (ret == 0) {
4712                 if (s3_state) {
4713                         dm_resume(adev);
4714                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4715                 } else
4716                         dm_suspend(adev);
4717         }
4718
4719         return ret == 0 ? count : 0;
4720 }
4721
4722 DEVICE_ATTR_WO(s3_debug);
4723
4724 #endif
4725
4726 static int dm_init_microcode(struct amdgpu_device *adev)
4727 {
4728         char *fw_name_dmub;
4729         int r;
4730
4731         switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4732         case IP_VERSION(2, 1, 0):
4733                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4734                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4735                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4736                 break;
4737         case IP_VERSION(3, 0, 0):
4738                 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
4739                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4740                 else
4741                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4742                 break;
4743         case IP_VERSION(3, 0, 1):
4744                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4745                 break;
4746         case IP_VERSION(3, 0, 2):
4747                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4748                 break;
4749         case IP_VERSION(3, 0, 3):
4750                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4751                 break;
4752         case IP_VERSION(3, 1, 2):
4753         case IP_VERSION(3, 1, 3):
4754                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4755                 break;
4756         case IP_VERSION(3, 1, 4):
4757                 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4758                 break;
4759         case IP_VERSION(3, 1, 5):
4760                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4761                 break;
4762         case IP_VERSION(3, 1, 6):
4763                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
4764                 break;
4765         case IP_VERSION(3, 2, 0):
4766                 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4767                 break;
4768         case IP_VERSION(3, 2, 1):
4769                 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4770                 break;
4771         case IP_VERSION(3, 5, 0):
4772                 fw_name_dmub = FIRMWARE_DCN_35_DMUB;
4773                 break;
4774         default:
4775                 /* ASIC doesn't support DMUB. */
4776                 return 0;
4777         }
4778         r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4779         return r;
4780 }
4781
4782 static int dm_early_init(void *handle)
4783 {
4784         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4785         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4786         struct atom_context *ctx = mode_info->atom_context;
4787         int index = GetIndexIntoMasterTable(DATA, Object_Header);
4788         u16 data_offset;
4789
4790         /* if there is no object header, skip DM */
4791         if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4792                 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4793                 dev_info(adev->dev, "No object header, skipping DM\n");
4794                 return -ENOENT;
4795         }
4796
4797         switch (adev->asic_type) {
4798 #if defined(CONFIG_DRM_AMD_DC_SI)
4799         case CHIP_TAHITI:
4800         case CHIP_PITCAIRN:
4801         case CHIP_VERDE:
4802                 adev->mode_info.num_crtc = 6;
4803                 adev->mode_info.num_hpd = 6;
4804                 adev->mode_info.num_dig = 6;
4805                 break;
4806         case CHIP_OLAND:
4807                 adev->mode_info.num_crtc = 2;
4808                 adev->mode_info.num_hpd = 2;
4809                 adev->mode_info.num_dig = 2;
4810                 break;
4811 #endif
4812         case CHIP_BONAIRE:
4813         case CHIP_HAWAII:
4814                 adev->mode_info.num_crtc = 6;
4815                 adev->mode_info.num_hpd = 6;
4816                 adev->mode_info.num_dig = 6;
4817                 break;
4818         case CHIP_KAVERI:
4819                 adev->mode_info.num_crtc = 4;
4820                 adev->mode_info.num_hpd = 6;
4821                 adev->mode_info.num_dig = 7;
4822                 break;
4823         case CHIP_KABINI:
4824         case CHIP_MULLINS:
4825                 adev->mode_info.num_crtc = 2;
4826                 adev->mode_info.num_hpd = 6;
4827                 adev->mode_info.num_dig = 6;
4828                 break;
4829         case CHIP_FIJI:
4830         case CHIP_TONGA:
4831                 adev->mode_info.num_crtc = 6;
4832                 adev->mode_info.num_hpd = 6;
4833                 adev->mode_info.num_dig = 7;
4834                 break;
4835         case CHIP_CARRIZO:
4836                 adev->mode_info.num_crtc = 3;
4837                 adev->mode_info.num_hpd = 6;
4838                 adev->mode_info.num_dig = 9;
4839                 break;
4840         case CHIP_STONEY:
4841                 adev->mode_info.num_crtc = 2;
4842                 adev->mode_info.num_hpd = 6;
4843                 adev->mode_info.num_dig = 9;
4844                 break;
4845         case CHIP_POLARIS11:
4846         case CHIP_POLARIS12:
4847                 adev->mode_info.num_crtc = 5;
4848                 adev->mode_info.num_hpd = 5;
4849                 adev->mode_info.num_dig = 5;
4850                 break;
4851         case CHIP_POLARIS10:
4852         case CHIP_VEGAM:
4853                 adev->mode_info.num_crtc = 6;
4854                 adev->mode_info.num_hpd = 6;
4855                 adev->mode_info.num_dig = 6;
4856                 break;
4857         case CHIP_VEGA10:
4858         case CHIP_VEGA12:
4859         case CHIP_VEGA20:
4860                 adev->mode_info.num_crtc = 6;
4861                 adev->mode_info.num_hpd = 6;
4862                 adev->mode_info.num_dig = 6;
4863                 break;
4864         default:
4865
4866                 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
4867                 case IP_VERSION(2, 0, 2):
4868                 case IP_VERSION(3, 0, 0):
4869                         adev->mode_info.num_crtc = 6;
4870                         adev->mode_info.num_hpd = 6;
4871                         adev->mode_info.num_dig = 6;
4872                         break;
4873                 case IP_VERSION(2, 0, 0):
4874                 case IP_VERSION(3, 0, 2):
4875                         adev->mode_info.num_crtc = 5;
4876                         adev->mode_info.num_hpd = 5;
4877                         adev->mode_info.num_dig = 5;
4878                         break;
4879                 case IP_VERSION(2, 0, 3):
4880                 case IP_VERSION(3, 0, 3):
4881                         adev->mode_info.num_crtc = 2;
4882                         adev->mode_info.num_hpd = 2;
4883                         adev->mode_info.num_dig = 2;
4884                         break;
4885                 case IP_VERSION(1, 0, 0):
4886                 case IP_VERSION(1, 0, 1):
4887                 case IP_VERSION(3, 0, 1):
4888                 case IP_VERSION(2, 1, 0):
4889                 case IP_VERSION(3, 1, 2):
4890                 case IP_VERSION(3, 1, 3):
4891                 case IP_VERSION(3, 1, 4):
4892                 case IP_VERSION(3, 1, 5):
4893                 case IP_VERSION(3, 1, 6):
4894                 case IP_VERSION(3, 2, 0):
4895                 case IP_VERSION(3, 2, 1):
4896                 case IP_VERSION(3, 5, 0):
4897                         adev->mode_info.num_crtc = 4;
4898                         adev->mode_info.num_hpd = 4;
4899                         adev->mode_info.num_dig = 4;
4900                         break;
4901                 default:
4902                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4903                                         amdgpu_ip_version(adev, DCE_HWIP, 0));
4904                         return -EINVAL;
4905                 }
4906                 break;
4907         }
4908
4909         if (adev->mode_info.funcs == NULL)
4910                 adev->mode_info.funcs = &dm_display_funcs;
4911
4912         /*
4913          * Note: Do NOT change adev->audio_endpt_rreg and
4914          * adev->audio_endpt_wreg because they are initialised in
4915          * amdgpu_device_init()
4916          */
4917 #if defined(CONFIG_DEBUG_KERNEL_DC)
4918         device_create_file(
4919                 adev_to_drm(adev)->dev,
4920                 &dev_attr_s3_debug);
4921 #endif
4922         adev->dc_enabled = true;
4923
4924         return dm_init_microcode(adev);
4925 }
4926
4927 static bool modereset_required(struct drm_crtc_state *crtc_state)
4928 {
4929         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4930 }
4931
4932 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4933 {
4934         drm_encoder_cleanup(encoder);
4935         kfree(encoder);
4936 }
4937
4938 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4939         .destroy = amdgpu_dm_encoder_destroy,
4940 };
4941
4942 static int
4943 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4944                             const enum surface_pixel_format format,
4945                             enum dc_color_space *color_space)
4946 {
4947         bool full_range;
4948
4949         *color_space = COLOR_SPACE_SRGB;
4950
4951         /* DRM color properties only affect non-RGB formats. */
4952         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4953                 return 0;
4954
4955         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4956
4957         switch (plane_state->color_encoding) {
4958         case DRM_COLOR_YCBCR_BT601:
4959                 if (full_range)
4960                         *color_space = COLOR_SPACE_YCBCR601;
4961                 else
4962                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4963                 break;
4964
4965         case DRM_COLOR_YCBCR_BT709:
4966                 if (full_range)
4967                         *color_space = COLOR_SPACE_YCBCR709;
4968                 else
4969                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4970                 break;
4971
4972         case DRM_COLOR_YCBCR_BT2020:
4973                 if (full_range)
4974                         *color_space = COLOR_SPACE_2020_YCBCR;
4975                 else
4976                         return -EINVAL;
4977                 break;
4978
4979         default:
4980                 return -EINVAL;
4981         }
4982
4983         return 0;
4984 }
4985
4986 static int
4987 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4988                             const struct drm_plane_state *plane_state,
4989                             const u64 tiling_flags,
4990                             struct dc_plane_info *plane_info,
4991                             struct dc_plane_address *address,
4992                             bool tmz_surface,
4993                             bool force_disable_dcc)
4994 {
4995         const struct drm_framebuffer *fb = plane_state->fb;
4996         const struct amdgpu_framebuffer *afb =
4997                 to_amdgpu_framebuffer(plane_state->fb);
4998         int ret;
4999
5000         memset(plane_info, 0, sizeof(*plane_info));
5001
5002         switch (fb->format->format) {
5003         case DRM_FORMAT_C8:
5004                 plane_info->format =
5005                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5006                 break;
5007         case DRM_FORMAT_RGB565:
5008                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5009                 break;
5010         case DRM_FORMAT_XRGB8888:
5011         case DRM_FORMAT_ARGB8888:
5012                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5013                 break;
5014         case DRM_FORMAT_XRGB2101010:
5015         case DRM_FORMAT_ARGB2101010:
5016                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5017                 break;
5018         case DRM_FORMAT_XBGR2101010:
5019         case DRM_FORMAT_ABGR2101010:
5020                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5021                 break;
5022         case DRM_FORMAT_XBGR8888:
5023         case DRM_FORMAT_ABGR8888:
5024                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5025                 break;
5026         case DRM_FORMAT_NV21:
5027                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5028                 break;
5029         case DRM_FORMAT_NV12:
5030                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5031                 break;
5032         case DRM_FORMAT_P010:
5033                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5034                 break;
5035         case DRM_FORMAT_XRGB16161616F:
5036         case DRM_FORMAT_ARGB16161616F:
5037                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5038                 break;
5039         case DRM_FORMAT_XBGR16161616F:
5040         case DRM_FORMAT_ABGR16161616F:
5041                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5042                 break;
5043         case DRM_FORMAT_XRGB16161616:
5044         case DRM_FORMAT_ARGB16161616:
5045                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5046                 break;
5047         case DRM_FORMAT_XBGR16161616:
5048         case DRM_FORMAT_ABGR16161616:
5049                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5050                 break;
5051         default:
5052                 DRM_ERROR(
5053                         "Unsupported screen format %p4cc\n",
5054                         &fb->format->format);
5055                 return -EINVAL;
5056         }
5057
5058         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5059         case DRM_MODE_ROTATE_0:
5060                 plane_info->rotation = ROTATION_ANGLE_0;
5061                 break;
5062         case DRM_MODE_ROTATE_90:
5063                 plane_info->rotation = ROTATION_ANGLE_90;
5064                 break;
5065         case DRM_MODE_ROTATE_180:
5066                 plane_info->rotation = ROTATION_ANGLE_180;
5067                 break;
5068         case DRM_MODE_ROTATE_270:
5069                 plane_info->rotation = ROTATION_ANGLE_270;
5070                 break;
5071         default:
5072                 plane_info->rotation = ROTATION_ANGLE_0;
5073                 break;
5074         }
5075
5076
5077         plane_info->visible = true;
5078         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5079
5080         plane_info->layer_index = plane_state->normalized_zpos;
5081
5082         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5083                                           &plane_info->color_space);
5084         if (ret)
5085                 return ret;
5086
5087         ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5088                                            plane_info->rotation, tiling_flags,
5089                                            &plane_info->tiling_info,
5090                                            &plane_info->plane_size,
5091                                            &plane_info->dcc, address,
5092                                            tmz_surface, force_disable_dcc);
5093         if (ret)
5094                 return ret;
5095
5096         amdgpu_dm_plane_fill_blending_from_plane_state(
5097                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5098                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5099
5100         return 0;
5101 }
5102
5103 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5104                                     struct dc_plane_state *dc_plane_state,
5105                                     struct drm_plane_state *plane_state,
5106                                     struct drm_crtc_state *crtc_state)
5107 {
5108         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5109         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5110         struct dc_scaling_info scaling_info;
5111         struct dc_plane_info plane_info;
5112         int ret;
5113         bool force_disable_dcc = false;
5114
5115         ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5116         if (ret)
5117                 return ret;
5118
5119         dc_plane_state->src_rect = scaling_info.src_rect;
5120         dc_plane_state->dst_rect = scaling_info.dst_rect;
5121         dc_plane_state->clip_rect = scaling_info.clip_rect;
5122         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5123
5124         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5125         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5126                                           afb->tiling_flags,
5127                                           &plane_info,
5128                                           &dc_plane_state->address,
5129                                           afb->tmz_surface,
5130                                           force_disable_dcc);
5131         if (ret)
5132                 return ret;
5133
5134         dc_plane_state->format = plane_info.format;
5135         dc_plane_state->color_space = plane_info.color_space;
5136         dc_plane_state->format = plane_info.format;
5137         dc_plane_state->plane_size = plane_info.plane_size;
5138         dc_plane_state->rotation = plane_info.rotation;
5139         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5140         dc_plane_state->stereo_format = plane_info.stereo_format;
5141         dc_plane_state->tiling_info = plane_info.tiling_info;
5142         dc_plane_state->visible = plane_info.visible;
5143         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5144         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5145         dc_plane_state->global_alpha = plane_info.global_alpha;
5146         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5147         dc_plane_state->dcc = plane_info.dcc;
5148         dc_plane_state->layer_index = plane_info.layer_index;
5149         dc_plane_state->flip_int_enabled = true;
5150
5151         /*
5152          * Always set input transfer function, since plane state is refreshed
5153          * every time.
5154          */
5155         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
5156                                                 plane_state,
5157                                                 dc_plane_state);
5158         if (ret)
5159                 return ret;
5160
5161         return 0;
5162 }
5163
5164 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5165                                       struct rect *dirty_rect, int32_t x,
5166                                       s32 y, s32 width, s32 height,
5167                                       int *i, bool ffu)
5168 {
5169         WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
5170
5171         dirty_rect->x = x;
5172         dirty_rect->y = y;
5173         dirty_rect->width = width;
5174         dirty_rect->height = height;
5175
5176         if (ffu)
5177                 drm_dbg(plane->dev,
5178                         "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5179                         plane->base.id, width, height);
5180         else
5181                 drm_dbg(plane->dev,
5182                         "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5183                         plane->base.id, x, y, width, height);
5184
5185         (*i)++;
5186 }
5187
5188 /**
5189  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5190  *
5191  * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5192  *         remote fb
5193  * @old_plane_state: Old state of @plane
5194  * @new_plane_state: New state of @plane
5195  * @crtc_state: New state of CRTC connected to the @plane
5196  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5197  * @dirty_regions_changed: dirty regions changed
5198  *
5199  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5200  * (referred to as "damage clips" in DRM nomenclature) that require updating on
5201  * the eDP remote buffer. The responsibility of specifying the dirty regions is
5202  * amdgpu_dm's.
5203  *
5204  * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5205  * plane with regions that require flushing to the eDP remote buffer. In
5206  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5207  * implicitly provide damage clips without any client support via the plane
5208  * bounds.
5209  */
5210 static void fill_dc_dirty_rects(struct drm_plane *plane,
5211                                 struct drm_plane_state *old_plane_state,
5212                                 struct drm_plane_state *new_plane_state,
5213                                 struct drm_crtc_state *crtc_state,
5214                                 struct dc_flip_addrs *flip_addrs,
5215                                 bool *dirty_regions_changed)
5216 {
5217         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5218         struct rect *dirty_rects = flip_addrs->dirty_rects;
5219         u32 num_clips;
5220         struct drm_mode_rect *clips;
5221         bool bb_changed;
5222         bool fb_changed;
5223         u32 i = 0;
5224         *dirty_regions_changed = false;
5225
5226         /*
5227          * Cursor plane has it's own dirty rect update interface. See
5228          * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5229          */
5230         if (plane->type == DRM_PLANE_TYPE_CURSOR)
5231                 return;
5232
5233         if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
5234                 goto ffu;
5235
5236         num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5237         clips = drm_plane_get_damage_clips(new_plane_state);
5238
5239         if (!dm_crtc_state->mpo_requested) {
5240                 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5241                         goto ffu;
5242
5243                 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5244                         fill_dc_dirty_rect(new_plane_state->plane,
5245                                            &dirty_rects[flip_addrs->dirty_rect_count],
5246                                            clips->x1, clips->y1,
5247                                            clips->x2 - clips->x1, clips->y2 - clips->y1,
5248                                            &flip_addrs->dirty_rect_count,
5249                                            false);
5250                 return;
5251         }
5252
5253         /*
5254          * MPO is requested. Add entire plane bounding box to dirty rects if
5255          * flipped to or damaged.
5256          *
5257          * If plane is moved or resized, also add old bounding box to dirty
5258          * rects.
5259          */
5260         fb_changed = old_plane_state->fb->base.id !=
5261                      new_plane_state->fb->base.id;
5262         bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5263                       old_plane_state->crtc_y != new_plane_state->crtc_y ||
5264                       old_plane_state->crtc_w != new_plane_state->crtc_w ||
5265                       old_plane_state->crtc_h != new_plane_state->crtc_h);
5266
5267         drm_dbg(plane->dev,
5268                 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5269                 new_plane_state->plane->base.id,
5270                 bb_changed, fb_changed, num_clips);
5271
5272         *dirty_regions_changed = bb_changed;
5273
5274         if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
5275                 goto ffu;
5276
5277         if (bb_changed) {
5278                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5279                                    new_plane_state->crtc_x,
5280                                    new_plane_state->crtc_y,
5281                                    new_plane_state->crtc_w,
5282                                    new_plane_state->crtc_h, &i, false);
5283
5284                 /* Add old plane bounding-box if plane is moved or resized */
5285                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5286                                    old_plane_state->crtc_x,
5287                                    old_plane_state->crtc_y,
5288                                    old_plane_state->crtc_w,
5289                                    old_plane_state->crtc_h, &i, false);
5290         }
5291
5292         if (num_clips) {
5293                 for (; i < num_clips; clips++)
5294                         fill_dc_dirty_rect(new_plane_state->plane,
5295                                            &dirty_rects[i], clips->x1,
5296                                            clips->y1, clips->x2 - clips->x1,
5297                                            clips->y2 - clips->y1, &i, false);
5298         } else if (fb_changed && !bb_changed) {
5299                 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5300                                    new_plane_state->crtc_x,
5301                                    new_plane_state->crtc_y,
5302                                    new_plane_state->crtc_w,
5303                                    new_plane_state->crtc_h, &i, false);
5304         }
5305
5306         flip_addrs->dirty_rect_count = i;
5307         return;
5308
5309 ffu:
5310         fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5311                            dm_crtc_state->base.mode.crtc_hdisplay,
5312                            dm_crtc_state->base.mode.crtc_vdisplay,
5313                            &flip_addrs->dirty_rect_count, true);
5314 }
5315
5316 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5317                                            const struct dm_connector_state *dm_state,
5318                                            struct dc_stream_state *stream)
5319 {
5320         enum amdgpu_rmx_type rmx_type;
5321
5322         struct rect src = { 0 }; /* viewport in composition space*/
5323         struct rect dst = { 0 }; /* stream addressable area */
5324
5325         /* no mode. nothing to be done */
5326         if (!mode)
5327                 return;
5328
5329         /* Full screen scaling by default */
5330         src.width = mode->hdisplay;
5331         src.height = mode->vdisplay;
5332         dst.width = stream->timing.h_addressable;
5333         dst.height = stream->timing.v_addressable;
5334
5335         if (dm_state) {
5336                 rmx_type = dm_state->scaling;
5337                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5338                         if (src.width * dst.height <
5339                                         src.height * dst.width) {
5340                                 /* height needs less upscaling/more downscaling */
5341                                 dst.width = src.width *
5342                                                 dst.height / src.height;
5343                         } else {
5344                                 /* width needs less upscaling/more downscaling */
5345                                 dst.height = src.height *
5346                                                 dst.width / src.width;
5347                         }
5348                 } else if (rmx_type == RMX_CENTER) {
5349                         dst = src;
5350                 }
5351
5352                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5353                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5354
5355                 if (dm_state->underscan_enable) {
5356                         dst.x += dm_state->underscan_hborder / 2;
5357                         dst.y += dm_state->underscan_vborder / 2;
5358                         dst.width -= dm_state->underscan_hborder;
5359                         dst.height -= dm_state->underscan_vborder;
5360                 }
5361         }
5362
5363         stream->src = src;
5364         stream->dst = dst;
5365
5366         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5367                       dst.x, dst.y, dst.width, dst.height);
5368
5369 }
5370
5371 static enum dc_color_depth
5372 convert_color_depth_from_display_info(const struct drm_connector *connector,
5373                                       bool is_y420, int requested_bpc)
5374 {
5375         u8 bpc;
5376
5377         if (is_y420) {
5378                 bpc = 8;
5379
5380                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5381                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5382                         bpc = 16;
5383                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5384                         bpc = 12;
5385                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5386                         bpc = 10;
5387         } else {
5388                 bpc = (uint8_t)connector->display_info.bpc;
5389                 /* Assume 8 bpc by default if no bpc is specified. */
5390                 bpc = bpc ? bpc : 8;
5391         }
5392
5393         if (requested_bpc > 0) {
5394                 /*
5395                  * Cap display bpc based on the user requested value.
5396                  *
5397                  * The value for state->max_bpc may not correctly updated
5398                  * depending on when the connector gets added to the state
5399                  * or if this was called outside of atomic check, so it
5400                  * can't be used directly.
5401                  */
5402                 bpc = min_t(u8, bpc, requested_bpc);
5403
5404                 /* Round down to the nearest even number. */
5405                 bpc = bpc - (bpc & 1);
5406         }
5407
5408         switch (bpc) {
5409         case 0:
5410                 /*
5411                  * Temporary Work around, DRM doesn't parse color depth for
5412                  * EDID revision before 1.4
5413                  * TODO: Fix edid parsing
5414                  */
5415                 return COLOR_DEPTH_888;
5416         case 6:
5417                 return COLOR_DEPTH_666;
5418         case 8:
5419                 return COLOR_DEPTH_888;
5420         case 10:
5421                 return COLOR_DEPTH_101010;
5422         case 12:
5423                 return COLOR_DEPTH_121212;
5424         case 14:
5425                 return COLOR_DEPTH_141414;
5426         case 16:
5427                 return COLOR_DEPTH_161616;
5428         default:
5429                 return COLOR_DEPTH_UNDEFINED;
5430         }
5431 }
5432
5433 static enum dc_aspect_ratio
5434 get_aspect_ratio(const struct drm_display_mode *mode_in)
5435 {
5436         /* 1-1 mapping, since both enums follow the HDMI spec. */
5437         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5438 }
5439
5440 static enum dc_color_space
5441 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
5442                        const struct drm_connector_state *connector_state)
5443 {
5444         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5445
5446         switch (connector_state->colorspace) {
5447         case DRM_MODE_COLORIMETRY_BT601_YCC:
5448                 if (dc_crtc_timing->flags.Y_ONLY)
5449                         color_space = COLOR_SPACE_YCBCR601_LIMITED;
5450                 else
5451                         color_space = COLOR_SPACE_YCBCR601;
5452                 break;
5453         case DRM_MODE_COLORIMETRY_BT709_YCC:
5454                 if (dc_crtc_timing->flags.Y_ONLY)
5455                         color_space = COLOR_SPACE_YCBCR709_LIMITED;
5456                 else
5457                         color_space = COLOR_SPACE_YCBCR709;
5458                 break;
5459         case DRM_MODE_COLORIMETRY_OPRGB:
5460                 color_space = COLOR_SPACE_ADOBERGB;
5461                 break;
5462         case DRM_MODE_COLORIMETRY_BT2020_RGB:
5463         case DRM_MODE_COLORIMETRY_BT2020_YCC:
5464                 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
5465                         color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
5466                 else
5467                         color_space = COLOR_SPACE_2020_YCBCR;
5468                 break;
5469         case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
5470         default:
5471                 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
5472                         color_space = COLOR_SPACE_SRGB;
5473                 /*
5474                  * 27030khz is the separation point between HDTV and SDTV
5475                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5476                  * respectively
5477                  */
5478                 } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
5479                         if (dc_crtc_timing->flags.Y_ONLY)
5480                                 color_space =
5481                                         COLOR_SPACE_YCBCR709_LIMITED;
5482                         else
5483                                 color_space = COLOR_SPACE_YCBCR709;
5484                 } else {
5485                         if (dc_crtc_timing->flags.Y_ONLY)
5486                                 color_space =
5487                                         COLOR_SPACE_YCBCR601_LIMITED;
5488                         else
5489                                 color_space = COLOR_SPACE_YCBCR601;
5490                 }
5491                 break;
5492         }
5493
5494         return color_space;
5495 }
5496
5497 static enum display_content_type
5498 get_output_content_type(const struct drm_connector_state *connector_state)
5499 {
5500         switch (connector_state->content_type) {
5501         default:
5502         case DRM_MODE_CONTENT_TYPE_NO_DATA:
5503                 return DISPLAY_CONTENT_TYPE_NO_DATA;
5504         case DRM_MODE_CONTENT_TYPE_GRAPHICS:
5505                 return DISPLAY_CONTENT_TYPE_GRAPHICS;
5506         case DRM_MODE_CONTENT_TYPE_PHOTO:
5507                 return DISPLAY_CONTENT_TYPE_PHOTO;
5508         case DRM_MODE_CONTENT_TYPE_CINEMA:
5509                 return DISPLAY_CONTENT_TYPE_CINEMA;
5510         case DRM_MODE_CONTENT_TYPE_GAME:
5511                 return DISPLAY_CONTENT_TYPE_GAME;
5512         }
5513 }
5514
5515 static bool adjust_colour_depth_from_display_info(
5516         struct dc_crtc_timing *timing_out,
5517         const struct drm_display_info *info)
5518 {
5519         enum dc_color_depth depth = timing_out->display_color_depth;
5520         int normalized_clk;
5521
5522         do {
5523                 normalized_clk = timing_out->pix_clk_100hz / 10;
5524                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5525                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5526                         normalized_clk /= 2;
5527                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5528                 switch (depth) {
5529                 case COLOR_DEPTH_888:
5530                         break;
5531                 case COLOR_DEPTH_101010:
5532                         normalized_clk = (normalized_clk * 30) / 24;
5533                         break;
5534                 case COLOR_DEPTH_121212:
5535                         normalized_clk = (normalized_clk * 36) / 24;
5536                         break;
5537                 case COLOR_DEPTH_161616:
5538                         normalized_clk = (normalized_clk * 48) / 24;
5539                         break;
5540                 default:
5541                         /* The above depths are the only ones valid for HDMI. */
5542                         return false;
5543                 }
5544                 if (normalized_clk <= info->max_tmds_clock) {
5545                         timing_out->display_color_depth = depth;
5546                         return true;
5547                 }
5548         } while (--depth > COLOR_DEPTH_666);
5549         return false;
5550 }
5551
5552 static void fill_stream_properties_from_drm_display_mode(
5553         struct dc_stream_state *stream,
5554         const struct drm_display_mode *mode_in,
5555         const struct drm_connector *connector,
5556         const struct drm_connector_state *connector_state,
5557         const struct dc_stream_state *old_stream,
5558         int requested_bpc)
5559 {
5560         struct dc_crtc_timing *timing_out = &stream->timing;
5561         const struct drm_display_info *info = &connector->display_info;
5562         struct amdgpu_dm_connector *aconnector = NULL;
5563         struct hdmi_vendor_infoframe hv_frame;
5564         struct hdmi_avi_infoframe avi_frame;
5565
5566         if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
5567                 aconnector = to_amdgpu_dm_connector(connector);
5568
5569         memset(&hv_frame, 0, sizeof(hv_frame));
5570         memset(&avi_frame, 0, sizeof(avi_frame));
5571
5572         timing_out->h_border_left = 0;
5573         timing_out->h_border_right = 0;
5574         timing_out->v_border_top = 0;
5575         timing_out->v_border_bottom = 0;
5576         /* TODO: un-hardcode */
5577         if (drm_mode_is_420_only(info, mode_in)
5578                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5579                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5580         else if (drm_mode_is_420_also(info, mode_in)
5581                         && aconnector
5582                         && aconnector->force_yuv420_output)
5583                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5584         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5585                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5586                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5587         else
5588                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5589
5590         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5591         timing_out->display_color_depth = convert_color_depth_from_display_info(
5592                 connector,
5593                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5594                 requested_bpc);
5595         timing_out->scan_type = SCANNING_TYPE_NODATA;
5596         timing_out->hdmi_vic = 0;
5597
5598         if (old_stream) {
5599                 timing_out->vic = old_stream->timing.vic;
5600                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5601                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5602         } else {
5603                 timing_out->vic = drm_match_cea_mode(mode_in);
5604                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5605                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5606                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5607                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5608         }
5609
5610         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5611                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5612                 timing_out->vic = avi_frame.video_code;
5613                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5614                 timing_out->hdmi_vic = hv_frame.vic;
5615         }
5616
5617         if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
5618                 timing_out->h_addressable = mode_in->hdisplay;
5619                 timing_out->h_total = mode_in->htotal;
5620                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5621                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5622                 timing_out->v_total = mode_in->vtotal;
5623                 timing_out->v_addressable = mode_in->vdisplay;
5624                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5625                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5626                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5627         } else {
5628                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5629                 timing_out->h_total = mode_in->crtc_htotal;
5630                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5631                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5632                 timing_out->v_total = mode_in->crtc_vtotal;
5633                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5634                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5635                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5636                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5637         }
5638
5639         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5640
5641         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5642         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5643         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5644                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5645                     drm_mode_is_420_also(info, mode_in) &&
5646                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5647                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5648                         adjust_colour_depth_from_display_info(timing_out, info);
5649                 }
5650         }
5651
5652         stream->output_color_space = get_output_color_space(timing_out, connector_state);
5653         stream->content_type = get_output_content_type(connector_state);
5654 }
5655
5656 static void fill_audio_info(struct audio_info *audio_info,
5657                             const struct drm_connector *drm_connector,
5658                             const struct dc_sink *dc_sink)
5659 {
5660         int i = 0;
5661         int cea_revision = 0;
5662         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5663
5664         audio_info->manufacture_id = edid_caps->manufacturer_id;
5665         audio_info->product_id = edid_caps->product_id;
5666
5667         cea_revision = drm_connector->display_info.cea_rev;
5668
5669         strscpy(audio_info->display_name,
5670                 edid_caps->display_name,
5671                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5672
5673         if (cea_revision >= 3) {
5674                 audio_info->mode_count = edid_caps->audio_mode_count;
5675
5676                 for (i = 0; i < audio_info->mode_count; ++i) {
5677                         audio_info->modes[i].format_code =
5678                                         (enum audio_format_code)
5679                                         (edid_caps->audio_modes[i].format_code);
5680                         audio_info->modes[i].channel_count =
5681                                         edid_caps->audio_modes[i].channel_count;
5682                         audio_info->modes[i].sample_rates.all =
5683                                         edid_caps->audio_modes[i].sample_rate;
5684                         audio_info->modes[i].sample_size =
5685                                         edid_caps->audio_modes[i].sample_size;
5686                 }
5687         }
5688
5689         audio_info->flags.all = edid_caps->speaker_flags;
5690
5691         /* TODO: We only check for the progressive mode, check for interlace mode too */
5692         if (drm_connector->latency_present[0]) {
5693                 audio_info->video_latency = drm_connector->video_latency[0];
5694                 audio_info->audio_latency = drm_connector->audio_latency[0];
5695         }
5696
5697         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5698
5699 }
5700
5701 static void
5702 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5703                                       struct drm_display_mode *dst_mode)
5704 {
5705         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5706         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5707         dst_mode->crtc_clock = src_mode->crtc_clock;
5708         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5709         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5710         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5711         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5712         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5713         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5714         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5715         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5716         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5717         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5718         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5719 }
5720
5721 static void
5722 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5723                                         const struct drm_display_mode *native_mode,
5724                                         bool scale_enabled)
5725 {
5726         if (scale_enabled) {
5727                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5728         } else if (native_mode->clock == drm_mode->clock &&
5729                         native_mode->htotal == drm_mode->htotal &&
5730                         native_mode->vtotal == drm_mode->vtotal) {
5731                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5732         } else {
5733                 /* no scaling nor amdgpu inserted, no need to patch */
5734         }
5735 }
5736
5737 static struct dc_sink *
5738 create_fake_sink(struct dc_link *link)
5739 {
5740         struct dc_sink_init_data sink_init_data = { 0 };
5741         struct dc_sink *sink = NULL;
5742
5743         sink_init_data.link = link;
5744         sink_init_data.sink_signal = link->connector_signal;
5745
5746         sink = dc_sink_create(&sink_init_data);
5747         if (!sink) {
5748                 DRM_ERROR("Failed to create sink!\n");
5749                 return NULL;
5750         }
5751         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5752
5753         return sink;
5754 }
5755
5756 static void set_multisync_trigger_params(
5757                 struct dc_stream_state *stream)
5758 {
5759         struct dc_stream_state *master = NULL;
5760
5761         if (stream->triggered_crtc_reset.enabled) {
5762                 master = stream->triggered_crtc_reset.event_source;
5763                 stream->triggered_crtc_reset.event =
5764                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5765                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5766                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5767         }
5768 }
5769
5770 static void set_master_stream(struct dc_stream_state *stream_set[],
5771                               int stream_count)
5772 {
5773         int j, highest_rfr = 0, master_stream = 0;
5774
5775         for (j = 0;  j < stream_count; j++) {
5776                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5777                         int refresh_rate = 0;
5778
5779                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5780                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5781                         if (refresh_rate > highest_rfr) {
5782                                 highest_rfr = refresh_rate;
5783                                 master_stream = j;
5784                         }
5785                 }
5786         }
5787         for (j = 0;  j < stream_count; j++) {
5788                 if (stream_set[j])
5789                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5790         }
5791 }
5792
5793 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5794 {
5795         int i = 0;
5796         struct dc_stream_state *stream;
5797
5798         if (context->stream_count < 2)
5799                 return;
5800         for (i = 0; i < context->stream_count ; i++) {
5801                 if (!context->streams[i])
5802                         continue;
5803                 /*
5804                  * TODO: add a function to read AMD VSDB bits and set
5805                  * crtc_sync_master.multi_sync_enabled flag
5806                  * For now it's set to false
5807                  */
5808         }
5809
5810         set_master_stream(context->streams, context->stream_count);
5811
5812         for (i = 0; i < context->stream_count ; i++) {
5813                 stream = context->streams[i];
5814
5815                 if (!stream)
5816                         continue;
5817
5818                 set_multisync_trigger_params(stream);
5819         }
5820 }
5821
5822 /**
5823  * DOC: FreeSync Video
5824  *
5825  * When a userspace application wants to play a video, the content follows a
5826  * standard format definition that usually specifies the FPS for that format.
5827  * The below list illustrates some video format and the expected FPS,
5828  * respectively:
5829  *
5830  * - TV/NTSC (23.976 FPS)
5831  * - Cinema (24 FPS)
5832  * - TV/PAL (25 FPS)
5833  * - TV/NTSC (29.97 FPS)
5834  * - TV/NTSC (30 FPS)
5835  * - Cinema HFR (48 FPS)
5836  * - TV/PAL (50 FPS)
5837  * - Commonly used (60 FPS)
5838  * - Multiples of 24 (48,72,96 FPS)
5839  *
5840  * The list of standards video format is not huge and can be added to the
5841  * connector modeset list beforehand. With that, userspace can leverage
5842  * FreeSync to extends the front porch in order to attain the target refresh
5843  * rate. Such a switch will happen seamlessly, without screen blanking or
5844  * reprogramming of the output in any other way. If the userspace requests a
5845  * modesetting change compatible with FreeSync modes that only differ in the
5846  * refresh rate, DC will skip the full update and avoid blink during the
5847  * transition. For example, the video player can change the modesetting from
5848  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5849  * causing any display blink. This same concept can be applied to a mode
5850  * setting change.
5851  */
5852 static struct drm_display_mode *
5853 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5854                 bool use_probed_modes)
5855 {
5856         struct drm_display_mode *m, *m_pref = NULL;
5857         u16 current_refresh, highest_refresh;
5858         struct list_head *list_head = use_probed_modes ?
5859                 &aconnector->base.probed_modes :
5860                 &aconnector->base.modes;
5861
5862         if (aconnector->freesync_vid_base.clock != 0)
5863                 return &aconnector->freesync_vid_base;
5864
5865         /* Find the preferred mode */
5866         list_for_each_entry(m, list_head, head) {
5867                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5868                         m_pref = m;
5869                         break;
5870                 }
5871         }
5872
5873         if (!m_pref) {
5874                 /* Probably an EDID with no preferred mode. Fallback to first entry */
5875                 m_pref = list_first_entry_or_null(
5876                                 &aconnector->base.modes, struct drm_display_mode, head);
5877                 if (!m_pref) {
5878                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5879                         return NULL;
5880                 }
5881         }
5882
5883         highest_refresh = drm_mode_vrefresh(m_pref);
5884
5885         /*
5886          * Find the mode with highest refresh rate with same resolution.
5887          * For some monitors, preferred mode is not the mode with highest
5888          * supported refresh rate.
5889          */
5890         list_for_each_entry(m, list_head, head) {
5891                 current_refresh  = drm_mode_vrefresh(m);
5892
5893                 if (m->hdisplay == m_pref->hdisplay &&
5894                     m->vdisplay == m_pref->vdisplay &&
5895                     highest_refresh < current_refresh) {
5896                         highest_refresh = current_refresh;
5897                         m_pref = m;
5898                 }
5899         }
5900
5901         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5902         return m_pref;
5903 }
5904
5905 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5906                 struct amdgpu_dm_connector *aconnector)
5907 {
5908         struct drm_display_mode *high_mode;
5909         int timing_diff;
5910
5911         high_mode = get_highest_refresh_rate_mode(aconnector, false);
5912         if (!high_mode || !mode)
5913                 return false;
5914
5915         timing_diff = high_mode->vtotal - mode->vtotal;
5916
5917         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5918             high_mode->hdisplay != mode->hdisplay ||
5919             high_mode->vdisplay != mode->vdisplay ||
5920             high_mode->hsync_start != mode->hsync_start ||
5921             high_mode->hsync_end != mode->hsync_end ||
5922             high_mode->htotal != mode->htotal ||
5923             high_mode->hskew != mode->hskew ||
5924             high_mode->vscan != mode->vscan ||
5925             high_mode->vsync_start - mode->vsync_start != timing_diff ||
5926             high_mode->vsync_end - mode->vsync_end != timing_diff)
5927                 return false;
5928         else
5929                 return true;
5930 }
5931
5932 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5933                             struct dc_sink *sink, struct dc_stream_state *stream,
5934                             struct dsc_dec_dpcd_caps *dsc_caps)
5935 {
5936         stream->timing.flags.DSC = 0;
5937         dsc_caps->is_dsc_supported = false;
5938
5939         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5940             sink->sink_signal == SIGNAL_TYPE_EDP)) {
5941                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5942                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5943                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5944                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5945                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5946                                 dsc_caps);
5947         }
5948 }
5949
5950
5951 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5952                                     struct dc_sink *sink, struct dc_stream_state *stream,
5953                                     struct dsc_dec_dpcd_caps *dsc_caps,
5954                                     uint32_t max_dsc_target_bpp_limit_override)
5955 {
5956         const struct dc_link_settings *verified_link_cap = NULL;
5957         u32 link_bw_in_kbps;
5958         u32 edp_min_bpp_x16, edp_max_bpp_x16;
5959         struct dc *dc = sink->ctx->dc;
5960         struct dc_dsc_bw_range bw_range = {0};
5961         struct dc_dsc_config dsc_cfg = {0};
5962         struct dc_dsc_config_options dsc_options = {0};
5963
5964         dc_dsc_get_default_config_option(dc, &dsc_options);
5965         dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5966
5967         verified_link_cap = dc_link_get_link_cap(stream->link);
5968         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5969         edp_min_bpp_x16 = 8 * 16;
5970         edp_max_bpp_x16 = 8 * 16;
5971
5972         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5973                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5974
5975         if (edp_max_bpp_x16 < edp_min_bpp_x16)
5976                 edp_min_bpp_x16 = edp_max_bpp_x16;
5977
5978         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5979                                 dc->debug.dsc_min_slice_height_override,
5980                                 edp_min_bpp_x16, edp_max_bpp_x16,
5981                                 dsc_caps,
5982                                 &stream->timing,
5983                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
5984                                 &bw_range)) {
5985
5986                 if (bw_range.max_kbps < link_bw_in_kbps) {
5987                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5988                                         dsc_caps,
5989                                         &dsc_options,
5990                                         0,
5991                                         &stream->timing,
5992                                         dc_link_get_highest_encoding_format(aconnector->dc_link),
5993                                         &dsc_cfg)) {
5994                                 stream->timing.dsc_cfg = dsc_cfg;
5995                                 stream->timing.flags.DSC = 1;
5996                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5997                         }
5998                         return;
5999                 }
6000         }
6001
6002         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6003                                 dsc_caps,
6004                                 &dsc_options,
6005                                 link_bw_in_kbps,
6006                                 &stream->timing,
6007                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6008                                 &dsc_cfg)) {
6009                 stream->timing.dsc_cfg = dsc_cfg;
6010                 stream->timing.flags.DSC = 1;
6011         }
6012 }
6013
6014
6015 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6016                                         struct dc_sink *sink, struct dc_stream_state *stream,
6017                                         struct dsc_dec_dpcd_caps *dsc_caps)
6018 {
6019         struct drm_connector *drm_connector = &aconnector->base;
6020         u32 link_bandwidth_kbps;
6021         struct dc *dc = sink->ctx->dc;
6022         u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
6023         u32 dsc_max_supported_bw_in_kbps;
6024         u32 max_dsc_target_bpp_limit_override =
6025                 drm_connector->display_info.max_dsc_bpp;
6026         struct dc_dsc_config_options dsc_options = {0};
6027
6028         dc_dsc_get_default_config_option(dc, &dsc_options);
6029         dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
6030
6031         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6032                                                         dc_link_get_link_cap(aconnector->dc_link));
6033
6034         /* Set DSC policy according to dsc_clock_en */
6035         dc_dsc_policy_set_enable_dsc_when_not_needed(
6036                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6037
6038         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
6039             !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
6040             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6041
6042                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6043
6044         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6045                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6046                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6047                                                 dsc_caps,
6048                                                 &dsc_options,
6049                                                 link_bandwidth_kbps,
6050                                                 &stream->timing,
6051                                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6052                                                 &stream->timing.dsc_cfg)) {
6053                                 stream->timing.flags.DSC = 1;
6054                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6055                         }
6056                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6057                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
6058                                         dc_link_get_highest_encoding_format(aconnector->dc_link));
6059                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6060                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6061
6062                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6063                                         max_supported_bw_in_kbps > 0 &&
6064                                         dsc_max_supported_bw_in_kbps > 0)
6065                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6066                                                 dsc_caps,
6067                                                 &dsc_options,
6068                                                 dsc_max_supported_bw_in_kbps,
6069                                                 &stream->timing,
6070                                                 dc_link_get_highest_encoding_format(aconnector->dc_link),
6071                                                 &stream->timing.dsc_cfg)) {
6072                                         stream->timing.flags.DSC = 1;
6073                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6074                                                                          __func__, drm_connector->name);
6075                                 }
6076                 }
6077         }
6078
6079         /* Overwrite the stream flag if DSC is enabled through debugfs */
6080         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6081                 stream->timing.flags.DSC = 1;
6082
6083         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6084                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6085
6086         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6087                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6088
6089         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6090                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6091 }
6092
6093 static struct dc_stream_state *
6094 create_stream_for_sink(struct drm_connector *connector,
6095                        const struct drm_display_mode *drm_mode,
6096                        const struct dm_connector_state *dm_state,
6097                        const struct dc_stream_state *old_stream,
6098                        int requested_bpc)
6099 {
6100         struct amdgpu_dm_connector *aconnector = NULL;
6101         struct drm_display_mode *preferred_mode = NULL;
6102         const struct drm_connector_state *con_state = &dm_state->base;
6103         struct dc_stream_state *stream = NULL;
6104         struct drm_display_mode mode;
6105         struct drm_display_mode saved_mode;
6106         struct drm_display_mode *freesync_mode = NULL;
6107         bool native_mode_found = false;
6108         bool recalculate_timing = false;
6109         bool scale = dm_state->scaling != RMX_OFF;
6110         int mode_refresh;
6111         int preferred_refresh = 0;
6112         enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
6113         struct dsc_dec_dpcd_caps dsc_caps;
6114
6115         struct dc_link *link = NULL;
6116         struct dc_sink *sink = NULL;
6117
6118         drm_mode_init(&mode, drm_mode);
6119         memset(&saved_mode, 0, sizeof(saved_mode));
6120
6121         if (connector == NULL) {
6122                 DRM_ERROR("connector is NULL!\n");
6123                 return stream;
6124         }
6125
6126         if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
6127                 aconnector = NULL;
6128                 aconnector = to_amdgpu_dm_connector(connector);
6129                 link = aconnector->dc_link;
6130         } else {
6131                 struct drm_writeback_connector *wbcon = NULL;
6132                 struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
6133
6134                 wbcon = drm_connector_to_writeback(connector);
6135                 dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
6136                 link = dm_wbcon->link;
6137         }
6138
6139         if (!aconnector || !aconnector->dc_sink) {
6140                 sink = create_fake_sink(link);
6141                 if (!sink)
6142                         return stream;
6143
6144         } else {
6145                 sink = aconnector->dc_sink;
6146                 dc_sink_retain(sink);
6147         }
6148
6149         stream = dc_create_stream_for_sink(sink);
6150
6151         if (stream == NULL) {
6152                 DRM_ERROR("Failed to create stream for sink!\n");
6153                 goto finish;
6154         }
6155
6156         /* We leave this NULL for writeback connectors */
6157         stream->dm_stream_context = aconnector;
6158
6159         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6160                 connector->display_info.hdmi.scdc.scrambling.low_rates;
6161
6162         list_for_each_entry(preferred_mode, &connector->modes, head) {
6163                 /* Search for preferred mode */
6164                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6165                         native_mode_found = true;
6166                         break;
6167                 }
6168         }
6169         if (!native_mode_found)
6170                 preferred_mode = list_first_entry_or_null(
6171                                 &connector->modes,
6172                                 struct drm_display_mode,
6173                                 head);
6174
6175         mode_refresh = drm_mode_vrefresh(&mode);
6176
6177         if (preferred_mode == NULL) {
6178                 /*
6179                  * This may not be an error, the use case is when we have no
6180                  * usermode calls to reset and set mode upon hotplug. In this
6181                  * case, we call set mode ourselves to restore the previous mode
6182                  * and the modelist may not be filled in time.
6183                  */
6184                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6185         } else if (aconnector) {
6186                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6187                 if (recalculate_timing) {
6188                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6189                         drm_mode_copy(&saved_mode, &mode);
6190                         drm_mode_copy(&mode, freesync_mode);
6191                 } else {
6192                         decide_crtc_timing_for_drm_display_mode(
6193                                         &mode, preferred_mode, scale);
6194
6195                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6196                 }
6197         }
6198
6199         if (recalculate_timing)
6200                 drm_mode_set_crtcinfo(&saved_mode, 0);
6201
6202         /*
6203          * If scaling is enabled and refresh rate didn't change
6204          * we copy the vic and polarities of the old timings
6205          */
6206         if (!scale || mode_refresh != preferred_refresh)
6207                 fill_stream_properties_from_drm_display_mode(
6208                         stream, &mode, connector, con_state, NULL,
6209                         requested_bpc);
6210         else
6211                 fill_stream_properties_from_drm_display_mode(
6212                         stream, &mode, connector, con_state, old_stream,
6213                         requested_bpc);
6214
6215         /* The rest isn't needed for writeback connectors */
6216         if (!aconnector)
6217                 goto finish;
6218
6219         if (aconnector->timing_changed) {
6220                 drm_dbg(aconnector->base.dev,
6221                         "overriding timing for automated test, bpc %d, changing to %d\n",
6222                         stream->timing.display_color_depth,
6223                         aconnector->timing_requested->display_color_depth);
6224                 stream->timing = *aconnector->timing_requested;
6225         }
6226
6227         /* SST DSC determination policy */
6228         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6229         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6230                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6231
6232         update_stream_scaling_settings(&mode, dm_state, stream);
6233
6234         fill_audio_info(
6235                 &stream->audio_info,
6236                 connector,
6237                 sink);
6238
6239         update_stream_signal(stream, sink);
6240
6241         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6242                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6243         else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
6244                          stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
6245                          stream->signal == SIGNAL_TYPE_EDP) {
6246                 //
6247                 // should decide stream support vsc sdp colorimetry capability
6248                 // before building vsc info packet
6249                 //
6250                 stream->use_vsc_sdp_for_colorimetry = false;
6251                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6252                         stream->use_vsc_sdp_for_colorimetry =
6253                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6254                 } else {
6255                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6256                                 stream->use_vsc_sdp_for_colorimetry = true;
6257                 }
6258                 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6259                         tf = TRANSFER_FUNC_GAMMA_22;
6260                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
6261
6262                 if (stream->link->psr_settings.psr_feature_enabled)
6263                         aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6264         }
6265 finish:
6266         dc_sink_release(sink);
6267
6268         return stream;
6269 }
6270
6271 static enum drm_connector_status
6272 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6273 {
6274         bool connected;
6275         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6276
6277         /*
6278          * Notes:
6279          * 1. This interface is NOT called in context of HPD irq.
6280          * 2. This interface *is called* in context of user-mode ioctl. Which
6281          * makes it a bad place for *any* MST-related activity.
6282          */
6283
6284         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6285             !aconnector->fake_enable)
6286                 connected = (aconnector->dc_sink != NULL);
6287         else
6288                 connected = (aconnector->base.force == DRM_FORCE_ON ||
6289                                 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
6290
6291         update_subconnector_property(aconnector);
6292
6293         return (connected ? connector_status_connected :
6294                         connector_status_disconnected);
6295 }
6296
6297 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6298                                             struct drm_connector_state *connector_state,
6299                                             struct drm_property *property,
6300                                             uint64_t val)
6301 {
6302         struct drm_device *dev = connector->dev;
6303         struct amdgpu_device *adev = drm_to_adev(dev);
6304         struct dm_connector_state *dm_old_state =
6305                 to_dm_connector_state(connector->state);
6306         struct dm_connector_state *dm_new_state =
6307                 to_dm_connector_state(connector_state);
6308
6309         int ret = -EINVAL;
6310
6311         if (property == dev->mode_config.scaling_mode_property) {
6312                 enum amdgpu_rmx_type rmx_type;
6313
6314                 switch (val) {
6315                 case DRM_MODE_SCALE_CENTER:
6316                         rmx_type = RMX_CENTER;
6317                         break;
6318                 case DRM_MODE_SCALE_ASPECT:
6319                         rmx_type = RMX_ASPECT;
6320                         break;
6321                 case DRM_MODE_SCALE_FULLSCREEN:
6322                         rmx_type = RMX_FULL;
6323                         break;
6324                 case DRM_MODE_SCALE_NONE:
6325                 default:
6326                         rmx_type = RMX_OFF;
6327                         break;
6328                 }
6329
6330                 if (dm_old_state->scaling == rmx_type)
6331                         return 0;
6332
6333                 dm_new_state->scaling = rmx_type;
6334                 ret = 0;
6335         } else if (property == adev->mode_info.underscan_hborder_property) {
6336                 dm_new_state->underscan_hborder = val;
6337                 ret = 0;
6338         } else if (property == adev->mode_info.underscan_vborder_property) {
6339                 dm_new_state->underscan_vborder = val;
6340                 ret = 0;
6341         } else if (property == adev->mode_info.underscan_property) {
6342                 dm_new_state->underscan_enable = val;
6343                 ret = 0;
6344         } else if (property == adev->mode_info.abm_level_property) {
6345                 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
6346                 ret = 0;
6347         }
6348
6349         return ret;
6350 }
6351
6352 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6353                                             const struct drm_connector_state *state,
6354                                             struct drm_property *property,
6355                                             uint64_t *val)
6356 {
6357         struct drm_device *dev = connector->dev;
6358         struct amdgpu_device *adev = drm_to_adev(dev);
6359         struct dm_connector_state *dm_state =
6360                 to_dm_connector_state(state);
6361         int ret = -EINVAL;
6362
6363         if (property == dev->mode_config.scaling_mode_property) {
6364                 switch (dm_state->scaling) {
6365                 case RMX_CENTER:
6366                         *val = DRM_MODE_SCALE_CENTER;
6367                         break;
6368                 case RMX_ASPECT:
6369                         *val = DRM_MODE_SCALE_ASPECT;
6370                         break;
6371                 case RMX_FULL:
6372                         *val = DRM_MODE_SCALE_FULLSCREEN;
6373                         break;
6374                 case RMX_OFF:
6375                 default:
6376                         *val = DRM_MODE_SCALE_NONE;
6377                         break;
6378                 }
6379                 ret = 0;
6380         } else if (property == adev->mode_info.underscan_hborder_property) {
6381                 *val = dm_state->underscan_hborder;
6382                 ret = 0;
6383         } else if (property == adev->mode_info.underscan_vborder_property) {
6384                 *val = dm_state->underscan_vborder;
6385                 ret = 0;
6386         } else if (property == adev->mode_info.underscan_property) {
6387                 *val = dm_state->underscan_enable;
6388                 ret = 0;
6389         } else if (property == adev->mode_info.abm_level_property) {
6390                 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
6391                         dm_state->abm_level : 0;
6392                 ret = 0;
6393         }
6394
6395         return ret;
6396 }
6397
6398 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6399 {
6400         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6401
6402         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6403 }
6404
6405 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6406 {
6407         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6408         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6409         struct amdgpu_display_manager *dm = &adev->dm;
6410
6411         /*
6412          * Call only if mst_mgr was initialized before since it's not done
6413          * for all connector types.
6414          */
6415         if (aconnector->mst_mgr.dev)
6416                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6417
6418         if (aconnector->bl_idx != -1) {
6419                 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
6420                 dm->backlight_dev[aconnector->bl_idx] = NULL;
6421         }
6422
6423         if (aconnector->dc_em_sink)
6424                 dc_sink_release(aconnector->dc_em_sink);
6425         aconnector->dc_em_sink = NULL;
6426         if (aconnector->dc_sink)
6427                 dc_sink_release(aconnector->dc_sink);
6428         aconnector->dc_sink = NULL;
6429
6430         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6431         drm_connector_unregister(connector);
6432         drm_connector_cleanup(connector);
6433         if (aconnector->i2c) {
6434                 i2c_del_adapter(&aconnector->i2c->base);
6435                 kfree(aconnector->i2c);
6436         }
6437         kfree(aconnector->dm_dp_aux.aux.name);
6438
6439         kfree(connector);
6440 }
6441
6442 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6443 {
6444         struct dm_connector_state *state =
6445                 to_dm_connector_state(connector->state);
6446
6447         if (connector->state)
6448                 __drm_atomic_helper_connector_destroy_state(connector->state);
6449
6450         kfree(state);
6451
6452         state = kzalloc(sizeof(*state), GFP_KERNEL);
6453
6454         if (state) {
6455                 state->scaling = RMX_OFF;
6456                 state->underscan_enable = false;
6457                 state->underscan_hborder = 0;
6458                 state->underscan_vborder = 0;
6459                 state->base.max_requested_bpc = 8;
6460                 state->vcpi_slots = 0;
6461                 state->pbn = 0;
6462
6463                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6464                         state->abm_level = amdgpu_dm_abm_level ?:
6465                                 ABM_LEVEL_IMMEDIATE_DISABLE;
6466
6467                 __drm_atomic_helper_connector_reset(connector, &state->base);
6468         }
6469 }
6470
6471 struct drm_connector_state *
6472 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6473 {
6474         struct dm_connector_state *state =
6475                 to_dm_connector_state(connector->state);
6476
6477         struct dm_connector_state *new_state =
6478                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6479
6480         if (!new_state)
6481                 return NULL;
6482
6483         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6484
6485         new_state->freesync_capable = state->freesync_capable;
6486         new_state->abm_level = state->abm_level;
6487         new_state->scaling = state->scaling;
6488         new_state->underscan_enable = state->underscan_enable;
6489         new_state->underscan_hborder = state->underscan_hborder;
6490         new_state->underscan_vborder = state->underscan_vborder;
6491         new_state->vcpi_slots = state->vcpi_slots;
6492         new_state->pbn = state->pbn;
6493         return &new_state->base;
6494 }
6495
6496 static int
6497 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6498 {
6499         struct amdgpu_dm_connector *amdgpu_dm_connector =
6500                 to_amdgpu_dm_connector(connector);
6501         int r;
6502
6503         amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
6504
6505         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6506             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6507                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6508                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6509                 if (r)
6510                         return r;
6511         }
6512
6513 #if defined(CONFIG_DEBUG_FS)
6514         connector_debugfs_init(amdgpu_dm_connector);
6515 #endif
6516
6517         return 0;
6518 }
6519
6520 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
6521 {
6522         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6523         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
6524         struct dc_link *dc_link = aconnector->dc_link;
6525         struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
6526         struct edid *edid;
6527
6528         /*
6529          * Note: drm_get_edid gets edid in the following order:
6530          * 1) override EDID if set via edid_override debugfs,
6531          * 2) firmware EDID if set via edid_firmware module parameter
6532          * 3) regular DDC read.
6533          */
6534         edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
6535         if (!edid) {
6536                 DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
6537                 return;
6538         }
6539
6540         aconnector->edid = edid;
6541
6542         /* Update emulated (virtual) sink's EDID */
6543         if (dc_em_sink && dc_link) {
6544                 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
6545                 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
6546                 dm_helpers_parse_edid_caps(
6547                         dc_link,
6548                         &dc_em_sink->dc_edid,
6549                         &dc_em_sink->edid_caps);
6550         }
6551 }
6552
6553 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6554         .reset = amdgpu_dm_connector_funcs_reset,
6555         .detect = amdgpu_dm_connector_detect,
6556         .fill_modes = drm_helper_probe_single_connector_modes,
6557         .destroy = amdgpu_dm_connector_destroy,
6558         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6559         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6560         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6561         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6562         .late_register = amdgpu_dm_connector_late_register,
6563         .early_unregister = amdgpu_dm_connector_unregister,
6564         .force = amdgpu_dm_connector_funcs_force
6565 };
6566
6567 static int get_modes(struct drm_connector *connector)
6568 {
6569         return amdgpu_dm_connector_get_modes(connector);
6570 }
6571
6572 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6573 {
6574         struct drm_connector *connector = &aconnector->base;
6575         struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
6576         struct dc_sink_init_data init_params = {
6577                         .link = aconnector->dc_link,
6578                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6579         };
6580         struct edid *edid;
6581
6582         /*
6583          * Note: drm_get_edid gets edid in the following order:
6584          * 1) override EDID if set via edid_override debugfs,
6585          * 2) firmware EDID if set via edid_firmware module parameter
6586          * 3) regular DDC read.
6587          */
6588         edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
6589         if (!edid) {
6590                 DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
6591                 return;
6592         }
6593
6594         if (drm_detect_hdmi_monitor(edid))
6595                 init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
6596
6597         aconnector->edid = edid;
6598
6599         aconnector->dc_em_sink = dc_link_add_remote_sink(
6600                 aconnector->dc_link,
6601                 (uint8_t *)edid,
6602                 (edid->extensions + 1) * EDID_LENGTH,
6603                 &init_params);
6604
6605         if (aconnector->base.force == DRM_FORCE_ON) {
6606                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6607                 aconnector->dc_link->local_sink :
6608                 aconnector->dc_em_sink;
6609                 dc_sink_retain(aconnector->dc_sink);
6610         }
6611 }
6612
6613 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6614 {
6615         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6616
6617         /*
6618          * In case of headless boot with force on for DP managed connector
6619          * Those settings have to be != 0 to get initial modeset
6620          */
6621         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6622                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6623                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6624         }
6625
6626         create_eml_sink(aconnector);
6627 }
6628
6629 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6630                                                 struct dc_stream_state *stream)
6631 {
6632         enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6633         struct dc_plane_state *dc_plane_state = NULL;
6634         struct dc_state *dc_state = NULL;
6635
6636         if (!stream)
6637                 goto cleanup;
6638
6639         dc_plane_state = dc_create_plane_state(dc);
6640         if (!dc_plane_state)
6641                 goto cleanup;
6642
6643         dc_state = dc_state_create(dc);
6644         if (!dc_state)
6645                 goto cleanup;
6646
6647         /* populate stream to plane */
6648         dc_plane_state->src_rect.height  = stream->src.height;
6649         dc_plane_state->src_rect.width   = stream->src.width;
6650         dc_plane_state->dst_rect.height  = stream->src.height;
6651         dc_plane_state->dst_rect.width   = stream->src.width;
6652         dc_plane_state->clip_rect.height = stream->src.height;
6653         dc_plane_state->clip_rect.width  = stream->src.width;
6654         dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6655         dc_plane_state->plane_size.surface_size.height = stream->src.height;
6656         dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6657         dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6658         dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6659         dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6660         dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6661         dc_plane_state->rotation = ROTATION_ANGLE_0;
6662         dc_plane_state->is_tiling_rotated = false;
6663         dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6664
6665         dc_result = dc_validate_stream(dc, stream);
6666         if (dc_result == DC_OK)
6667                 dc_result = dc_validate_plane(dc, dc_plane_state);
6668
6669         if (dc_result == DC_OK)
6670                 dc_result = dc_state_add_stream(dc, dc_state, stream);
6671
6672         if (dc_result == DC_OK && !dc_state_add_plane(
6673                                                 dc,
6674                                                 stream,
6675                                                 dc_plane_state,
6676                                                 dc_state))
6677                 dc_result = DC_FAIL_ATTACH_SURFACES;
6678
6679         if (dc_result == DC_OK)
6680                 dc_result = dc_validate_global_state(dc, dc_state, true);
6681
6682 cleanup:
6683         if (dc_state)
6684                 dc_state_release(dc_state);
6685
6686         if (dc_plane_state)
6687                 dc_plane_state_release(dc_plane_state);
6688
6689         return dc_result;
6690 }
6691
6692 struct dc_stream_state *
6693 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6694                                 const struct drm_display_mode *drm_mode,
6695                                 const struct dm_connector_state *dm_state,
6696                                 const struct dc_stream_state *old_stream)
6697 {
6698         struct drm_connector *connector = &aconnector->base;
6699         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6700         struct dc_stream_state *stream;
6701         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6702         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6703         enum dc_status dc_result = DC_OK;
6704
6705         do {
6706                 stream = create_stream_for_sink(connector, drm_mode,
6707                                                 dm_state, old_stream,
6708                                                 requested_bpc);
6709                 if (stream == NULL) {
6710                         DRM_ERROR("Failed to create stream for sink!\n");
6711                         break;
6712                 }
6713
6714                 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
6715                         return stream;
6716
6717                 dc_result = dc_validate_stream(adev->dm.dc, stream);
6718                 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6719                         dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6720
6721                 if (dc_result == DC_OK)
6722                         dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6723
6724                 if (dc_result != DC_OK) {
6725                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6726                                       drm_mode->hdisplay,
6727                                       drm_mode->vdisplay,
6728                                       drm_mode->clock,
6729                                       dc_result,
6730                                       dc_status_to_str(dc_result));
6731
6732                         dc_stream_release(stream);
6733                         stream = NULL;
6734                         requested_bpc -= 2; /* lower bpc to retry validation */
6735                 }
6736
6737         } while (stream == NULL && requested_bpc >= 6);
6738
6739         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6740                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6741
6742                 aconnector->force_yuv420_output = true;
6743                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6744                                                 dm_state, old_stream);
6745                 aconnector->force_yuv420_output = false;
6746         }
6747
6748         return stream;
6749 }
6750
6751 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6752                                    struct drm_display_mode *mode)
6753 {
6754         int result = MODE_ERROR;
6755         struct dc_sink *dc_sink;
6756         /* TODO: Unhardcode stream count */
6757         struct dc_stream_state *stream;
6758         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6759
6760         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6761                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6762                 return result;
6763
6764         /*
6765          * Only run this the first time mode_valid is called to initilialize
6766          * EDID mgmt
6767          */
6768         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6769                 !aconnector->dc_em_sink)
6770                 handle_edid_mgmt(aconnector);
6771
6772         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6773
6774         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6775                                 aconnector->base.force != DRM_FORCE_ON) {
6776                 DRM_ERROR("dc_sink is NULL!\n");
6777                 goto fail;
6778         }
6779
6780         drm_mode_set_crtcinfo(mode, 0);
6781
6782         stream = create_validate_stream_for_sink(aconnector, mode,
6783                                                  to_dm_connector_state(connector->state),
6784                                                  NULL);
6785         if (stream) {
6786                 dc_stream_release(stream);
6787                 result = MODE_OK;
6788         }
6789
6790 fail:
6791         /* TODO: error handling*/
6792         return result;
6793 }
6794
6795 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6796                                 struct dc_info_packet *out)
6797 {
6798         struct hdmi_drm_infoframe frame;
6799         unsigned char buf[30]; /* 26 + 4 */
6800         ssize_t len;
6801         int ret, i;
6802
6803         memset(out, 0, sizeof(*out));
6804
6805         if (!state->hdr_output_metadata)
6806                 return 0;
6807
6808         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6809         if (ret)
6810                 return ret;
6811
6812         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6813         if (len < 0)
6814                 return (int)len;
6815
6816         /* Static metadata is a fixed 26 bytes + 4 byte header. */
6817         if (len != 30)
6818                 return -EINVAL;
6819
6820         /* Prepare the infopacket for DC. */
6821         switch (state->connector->connector_type) {
6822         case DRM_MODE_CONNECTOR_HDMIA:
6823                 out->hb0 = 0x87; /* type */
6824                 out->hb1 = 0x01; /* version */
6825                 out->hb2 = 0x1A; /* length */
6826                 out->sb[0] = buf[3]; /* checksum */
6827                 i = 1;
6828                 break;
6829
6830         case DRM_MODE_CONNECTOR_DisplayPort:
6831         case DRM_MODE_CONNECTOR_eDP:
6832                 out->hb0 = 0x00; /* sdp id, zero */
6833                 out->hb1 = 0x87; /* type */
6834                 out->hb2 = 0x1D; /* payload len - 1 */
6835                 out->hb3 = (0x13 << 2); /* sdp version */
6836                 out->sb[0] = 0x01; /* version */
6837                 out->sb[1] = 0x1A; /* length */
6838                 i = 2;
6839                 break;
6840
6841         default:
6842                 return -EINVAL;
6843         }
6844
6845         memcpy(&out->sb[i], &buf[4], 26);
6846         out->valid = true;
6847
6848         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6849                        sizeof(out->sb), false);
6850
6851         return 0;
6852 }
6853
6854 static int
6855 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6856                                  struct drm_atomic_state *state)
6857 {
6858         struct drm_connector_state *new_con_state =
6859                 drm_atomic_get_new_connector_state(state, conn);
6860         struct drm_connector_state *old_con_state =
6861                 drm_atomic_get_old_connector_state(state, conn);
6862         struct drm_crtc *crtc = new_con_state->crtc;
6863         struct drm_crtc_state *new_crtc_state;
6864         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6865         int ret;
6866
6867         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6868
6869         if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6870                 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6871                 if (ret < 0)
6872                         return ret;
6873         }
6874
6875         if (!crtc)
6876                 return 0;
6877
6878         if (new_con_state->colorspace != old_con_state->colorspace) {
6879                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6880                 if (IS_ERR(new_crtc_state))
6881                         return PTR_ERR(new_crtc_state);
6882
6883                 new_crtc_state->mode_changed = true;
6884         }
6885
6886         if (new_con_state->content_type != old_con_state->content_type) {
6887                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6888                 if (IS_ERR(new_crtc_state))
6889                         return PTR_ERR(new_crtc_state);
6890
6891                 new_crtc_state->mode_changed = true;
6892         }
6893
6894         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6895                 struct dc_info_packet hdr_infopacket;
6896
6897                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6898                 if (ret)
6899                         return ret;
6900
6901                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6902                 if (IS_ERR(new_crtc_state))
6903                         return PTR_ERR(new_crtc_state);
6904
6905                 /*
6906                  * DC considers the stream backends changed if the
6907                  * static metadata changes. Forcing the modeset also
6908                  * gives a simple way for userspace to switch from
6909                  * 8bpc to 10bpc when setting the metadata to enter
6910                  * or exit HDR.
6911                  *
6912                  * Changing the static metadata after it's been
6913                  * set is permissible, however. So only force a
6914                  * modeset if we're entering or exiting HDR.
6915                  */
6916                 new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
6917                         !old_con_state->hdr_output_metadata ||
6918                         !new_con_state->hdr_output_metadata;
6919         }
6920
6921         return 0;
6922 }
6923
6924 static const struct drm_connector_helper_funcs
6925 amdgpu_dm_connector_helper_funcs = {
6926         /*
6927          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6928          * modes will be filtered by drm_mode_validate_size(), and those modes
6929          * are missing after user start lightdm. So we need to renew modes list.
6930          * in get_modes call back, not just return the modes count
6931          */
6932         .get_modes = get_modes,
6933         .mode_valid = amdgpu_dm_connector_mode_valid,
6934         .atomic_check = amdgpu_dm_connector_atomic_check,
6935 };
6936
6937 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6938 {
6939
6940 }
6941
6942 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6943 {
6944         switch (display_color_depth) {
6945         case COLOR_DEPTH_666:
6946                 return 6;
6947         case COLOR_DEPTH_888:
6948                 return 8;
6949         case COLOR_DEPTH_101010:
6950                 return 10;
6951         case COLOR_DEPTH_121212:
6952                 return 12;
6953         case COLOR_DEPTH_141414:
6954                 return 14;
6955         case COLOR_DEPTH_161616:
6956                 return 16;
6957         default:
6958                 break;
6959         }
6960         return 0;
6961 }
6962
6963 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6964                                           struct drm_crtc_state *crtc_state,
6965                                           struct drm_connector_state *conn_state)
6966 {
6967         struct drm_atomic_state *state = crtc_state->state;
6968         struct drm_connector *connector = conn_state->connector;
6969         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6970         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6971         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6972         struct drm_dp_mst_topology_mgr *mst_mgr;
6973         struct drm_dp_mst_port *mst_port;
6974         struct drm_dp_mst_topology_state *mst_state;
6975         enum dc_color_depth color_depth;
6976         int clock, bpp = 0;
6977         bool is_y420 = false;
6978
6979         if (!aconnector->mst_output_port)
6980                 return 0;
6981
6982         mst_port = aconnector->mst_output_port;
6983         mst_mgr = &aconnector->mst_root->mst_mgr;
6984
6985         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6986                 return 0;
6987
6988         mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6989         if (IS_ERR(mst_state))
6990                 return PTR_ERR(mst_state);
6991
6992         mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
6993
6994         if (!state->duplicated) {
6995                 int max_bpc = conn_state->max_requested_bpc;
6996
6997                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6998                           aconnector->force_yuv420_output;
6999                 color_depth = convert_color_depth_from_display_info(connector,
7000                                                                     is_y420,
7001                                                                     max_bpc);
7002                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7003                 clock = adjusted_mode->clock;
7004                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
7005         }
7006
7007         dm_new_connector_state->vcpi_slots =
7008                 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
7009                                               dm_new_connector_state->pbn);
7010         if (dm_new_connector_state->vcpi_slots < 0) {
7011                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7012                 return dm_new_connector_state->vcpi_slots;
7013         }
7014         return 0;
7015 }
7016
7017 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7018         .disable = dm_encoder_helper_disable,
7019         .atomic_check = dm_encoder_helper_atomic_check
7020 };
7021
7022 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7023                                             struct dc_state *dc_state,
7024                                             struct dsc_mst_fairness_vars *vars)
7025 {
7026         struct dc_stream_state *stream = NULL;
7027         struct drm_connector *connector;
7028         struct drm_connector_state *new_con_state;
7029         struct amdgpu_dm_connector *aconnector;
7030         struct dm_connector_state *dm_conn_state;
7031         int i, j, ret;
7032         int vcpi, pbn_div, pbn, slot_num = 0;
7033
7034         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7035
7036                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
7037                         continue;
7038
7039                 aconnector = to_amdgpu_dm_connector(connector);
7040
7041                 if (!aconnector->mst_output_port)
7042                         continue;
7043
7044                 if (!new_con_state || !new_con_state->crtc)
7045                         continue;
7046
7047                 dm_conn_state = to_dm_connector_state(new_con_state);
7048
7049                 for (j = 0; j < dc_state->stream_count; j++) {
7050                         stream = dc_state->streams[j];
7051                         if (!stream)
7052                                 continue;
7053
7054                         if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
7055                                 break;
7056
7057                         stream = NULL;
7058                 }
7059
7060                 if (!stream)
7061                         continue;
7062
7063                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7064                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7065                 for (j = 0; j < dc_state->stream_count; j++) {
7066                         if (vars[j].aconnector == aconnector) {
7067                                 pbn = vars[j].pbn;
7068                                 break;
7069                         }
7070                 }
7071
7072                 if (j == dc_state->stream_count)
7073                         continue;
7074
7075                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7076
7077                 if (stream->timing.flags.DSC != 1) {
7078                         dm_conn_state->pbn = pbn;
7079                         dm_conn_state->vcpi_slots = slot_num;
7080
7081                         ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
7082                                                            dm_conn_state->pbn, false);
7083                         if (ret < 0)
7084                                 return ret;
7085
7086                         continue;
7087                 }
7088
7089                 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
7090                 if (vcpi < 0)
7091                         return vcpi;
7092
7093                 dm_conn_state->pbn = pbn;
7094                 dm_conn_state->vcpi_slots = vcpi;
7095         }
7096         return 0;
7097 }
7098
7099 static int to_drm_connector_type(enum signal_type st)
7100 {
7101         switch (st) {
7102         case SIGNAL_TYPE_HDMI_TYPE_A:
7103                 return DRM_MODE_CONNECTOR_HDMIA;
7104         case SIGNAL_TYPE_EDP:
7105                 return DRM_MODE_CONNECTOR_eDP;
7106         case SIGNAL_TYPE_LVDS:
7107                 return DRM_MODE_CONNECTOR_LVDS;
7108         case SIGNAL_TYPE_RGB:
7109                 return DRM_MODE_CONNECTOR_VGA;
7110         case SIGNAL_TYPE_DISPLAY_PORT:
7111         case SIGNAL_TYPE_DISPLAY_PORT_MST:
7112                 return DRM_MODE_CONNECTOR_DisplayPort;
7113         case SIGNAL_TYPE_DVI_DUAL_LINK:
7114         case SIGNAL_TYPE_DVI_SINGLE_LINK:
7115                 return DRM_MODE_CONNECTOR_DVID;
7116         case SIGNAL_TYPE_VIRTUAL:
7117                 return DRM_MODE_CONNECTOR_VIRTUAL;
7118
7119         default:
7120                 return DRM_MODE_CONNECTOR_Unknown;
7121         }
7122 }
7123
7124 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7125 {
7126         struct drm_encoder *encoder;
7127
7128         /* There is only one encoder per connector */
7129         drm_connector_for_each_possible_encoder(connector, encoder)
7130                 return encoder;
7131
7132         return NULL;
7133 }
7134
7135 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7136 {
7137         struct drm_encoder *encoder;
7138         struct amdgpu_encoder *amdgpu_encoder;
7139
7140         encoder = amdgpu_dm_connector_to_encoder(connector);
7141
7142         if (encoder == NULL)
7143                 return;
7144
7145         amdgpu_encoder = to_amdgpu_encoder(encoder);
7146
7147         amdgpu_encoder->native_mode.clock = 0;
7148
7149         if (!list_empty(&connector->probed_modes)) {
7150                 struct drm_display_mode *preferred_mode = NULL;
7151
7152                 list_for_each_entry(preferred_mode,
7153                                     &connector->probed_modes,
7154                                     head) {
7155                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7156                                 amdgpu_encoder->native_mode = *preferred_mode;
7157
7158                         break;
7159                 }
7160
7161         }
7162 }
7163
7164 static struct drm_display_mode *
7165 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7166                              char *name,
7167                              int hdisplay, int vdisplay)
7168 {
7169         struct drm_device *dev = encoder->dev;
7170         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7171         struct drm_display_mode *mode = NULL;
7172         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7173
7174         mode = drm_mode_duplicate(dev, native_mode);
7175
7176         if (mode == NULL)
7177                 return NULL;
7178
7179         mode->hdisplay = hdisplay;
7180         mode->vdisplay = vdisplay;
7181         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7182         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7183
7184         return mode;
7185
7186 }
7187
7188 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7189                                                  struct drm_connector *connector)
7190 {
7191         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7192         struct drm_display_mode *mode = NULL;
7193         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7194         struct amdgpu_dm_connector *amdgpu_dm_connector =
7195                                 to_amdgpu_dm_connector(connector);
7196         int i;
7197         int n;
7198         struct mode_size {
7199                 char name[DRM_DISPLAY_MODE_LEN];
7200                 int w;
7201                 int h;
7202         } common_modes[] = {
7203                 {  "640x480",  640,  480},
7204                 {  "800x600",  800,  600},
7205                 { "1024x768", 1024,  768},
7206                 { "1280x720", 1280,  720},
7207                 { "1280x800", 1280,  800},
7208                 {"1280x1024", 1280, 1024},
7209                 { "1440x900", 1440,  900},
7210                 {"1680x1050", 1680, 1050},
7211                 {"1600x1200", 1600, 1200},
7212                 {"1920x1080", 1920, 1080},
7213                 {"1920x1200", 1920, 1200}
7214         };
7215
7216         n = ARRAY_SIZE(common_modes);
7217
7218         for (i = 0; i < n; i++) {
7219                 struct drm_display_mode *curmode = NULL;
7220                 bool mode_existed = false;
7221
7222                 if (common_modes[i].w > native_mode->hdisplay ||
7223                     common_modes[i].h > native_mode->vdisplay ||
7224                    (common_modes[i].w == native_mode->hdisplay &&
7225                     common_modes[i].h == native_mode->vdisplay))
7226                         continue;
7227
7228                 list_for_each_entry(curmode, &connector->probed_modes, head) {
7229                         if (common_modes[i].w == curmode->hdisplay &&
7230                             common_modes[i].h == curmode->vdisplay) {
7231                                 mode_existed = true;
7232                                 break;
7233                         }
7234                 }
7235
7236                 if (mode_existed)
7237                         continue;
7238
7239                 mode = amdgpu_dm_create_common_mode(encoder,
7240                                 common_modes[i].name, common_modes[i].w,
7241                                 common_modes[i].h);
7242                 if (!mode)
7243                         continue;
7244
7245                 drm_mode_probed_add(connector, mode);
7246                 amdgpu_dm_connector->num_modes++;
7247         }
7248 }
7249
7250 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7251 {
7252         struct drm_encoder *encoder;
7253         struct amdgpu_encoder *amdgpu_encoder;
7254         const struct drm_display_mode *native_mode;
7255
7256         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7257             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7258                 return;
7259
7260         mutex_lock(&connector->dev->mode_config.mutex);
7261         amdgpu_dm_connector_get_modes(connector);
7262         mutex_unlock(&connector->dev->mode_config.mutex);
7263
7264         encoder = amdgpu_dm_connector_to_encoder(connector);
7265         if (!encoder)
7266                 return;
7267
7268         amdgpu_encoder = to_amdgpu_encoder(encoder);
7269
7270         native_mode = &amdgpu_encoder->native_mode;
7271         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7272                 return;
7273
7274         drm_connector_set_panel_orientation_with_quirk(connector,
7275                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7276                                                        native_mode->hdisplay,
7277                                                        native_mode->vdisplay);
7278 }
7279
7280 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7281                                               struct edid *edid)
7282 {
7283         struct amdgpu_dm_connector *amdgpu_dm_connector =
7284                         to_amdgpu_dm_connector(connector);
7285
7286         if (edid) {
7287                 /* empty probed_modes */
7288                 INIT_LIST_HEAD(&connector->probed_modes);
7289                 amdgpu_dm_connector->num_modes =
7290                                 drm_add_edid_modes(connector, edid);
7291
7292                 /* sorting the probed modes before calling function
7293                  * amdgpu_dm_get_native_mode() since EDID can have
7294                  * more than one preferred mode. The modes that are
7295                  * later in the probed mode list could be of higher
7296                  * and preferred resolution. For example, 3840x2160
7297                  * resolution in base EDID preferred timing and 4096x2160
7298                  * preferred resolution in DID extension block later.
7299                  */
7300                 drm_mode_sort(&connector->probed_modes);
7301                 amdgpu_dm_get_native_mode(connector);
7302
7303                 /* Freesync capabilities are reset by calling
7304                  * drm_add_edid_modes() and need to be
7305                  * restored here.
7306                  */
7307                 amdgpu_dm_update_freesync_caps(connector, edid);
7308         } else {
7309                 amdgpu_dm_connector->num_modes = 0;
7310         }
7311 }
7312
7313 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7314                               struct drm_display_mode *mode)
7315 {
7316         struct drm_display_mode *m;
7317
7318         list_for_each_entry(m, &aconnector->base.probed_modes, head) {
7319                 if (drm_mode_equal(m, mode))
7320                         return true;
7321         }
7322
7323         return false;
7324 }
7325
7326 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7327 {
7328         const struct drm_display_mode *m;
7329         struct drm_display_mode *new_mode;
7330         uint i;
7331         u32 new_modes_count = 0;
7332
7333         /* Standard FPS values
7334          *
7335          * 23.976       - TV/NTSC
7336          * 24           - Cinema
7337          * 25           - TV/PAL
7338          * 29.97        - TV/NTSC
7339          * 30           - TV/NTSC
7340          * 48           - Cinema HFR
7341          * 50           - TV/PAL
7342          * 60           - Commonly used
7343          * 48,72,96,120 - Multiples of 24
7344          */
7345         static const u32 common_rates[] = {
7346                 23976, 24000, 25000, 29970, 30000,
7347                 48000, 50000, 60000, 72000, 96000, 120000
7348         };
7349
7350         /*
7351          * Find mode with highest refresh rate with the same resolution
7352          * as the preferred mode. Some monitors report a preferred mode
7353          * with lower resolution than the highest refresh rate supported.
7354          */
7355
7356         m = get_highest_refresh_rate_mode(aconnector, true);
7357         if (!m)
7358                 return 0;
7359
7360         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7361                 u64 target_vtotal, target_vtotal_diff;
7362                 u64 num, den;
7363
7364                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7365                         continue;
7366
7367                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7368                     common_rates[i] > aconnector->max_vfreq * 1000)
7369                         continue;
7370
7371                 num = (unsigned long long)m->clock * 1000 * 1000;
7372                 den = common_rates[i] * (unsigned long long)m->htotal;
7373                 target_vtotal = div_u64(num, den);
7374                 target_vtotal_diff = target_vtotal - m->vtotal;
7375
7376                 /* Check for illegal modes */
7377                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7378                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
7379                     m->vtotal + target_vtotal_diff < m->vsync_end)
7380                         continue;
7381
7382                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7383                 if (!new_mode)
7384                         goto out;
7385
7386                 new_mode->vtotal += (u16)target_vtotal_diff;
7387                 new_mode->vsync_start += (u16)target_vtotal_diff;
7388                 new_mode->vsync_end += (u16)target_vtotal_diff;
7389                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7390                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7391
7392                 if (!is_duplicate_mode(aconnector, new_mode)) {
7393                         drm_mode_probed_add(&aconnector->base, new_mode);
7394                         new_modes_count += 1;
7395                 } else
7396                         drm_mode_destroy(aconnector->base.dev, new_mode);
7397         }
7398  out:
7399         return new_modes_count;
7400 }
7401
7402 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7403                                                    struct edid *edid)
7404 {
7405         struct amdgpu_dm_connector *amdgpu_dm_connector =
7406                 to_amdgpu_dm_connector(connector);
7407
7408         if (!edid)
7409                 return;
7410
7411         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7412                 amdgpu_dm_connector->num_modes +=
7413                         add_fs_modes(amdgpu_dm_connector);
7414 }
7415
7416 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7417 {
7418         struct amdgpu_dm_connector *amdgpu_dm_connector =
7419                         to_amdgpu_dm_connector(connector);
7420         struct drm_encoder *encoder;
7421         struct edid *edid = amdgpu_dm_connector->edid;
7422         struct dc_link_settings *verified_link_cap =
7423                         &amdgpu_dm_connector->dc_link->verified_link_cap;
7424         const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
7425
7426         encoder = amdgpu_dm_connector_to_encoder(connector);
7427
7428         if (!drm_edid_is_valid(edid)) {
7429                 amdgpu_dm_connector->num_modes =
7430                                 drm_add_modes_noedid(connector, 640, 480);
7431                 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
7432                         amdgpu_dm_connector->num_modes +=
7433                                 drm_add_modes_noedid(connector, 1920, 1080);
7434         } else {
7435                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7436                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7437                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7438         }
7439         amdgpu_dm_fbc_init(connector);
7440
7441         return amdgpu_dm_connector->num_modes;
7442 }
7443
7444 static const u32 supported_colorspaces =
7445         BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
7446         BIT(DRM_MODE_COLORIMETRY_OPRGB) |
7447         BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
7448         BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
7449
7450 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7451                                      struct amdgpu_dm_connector *aconnector,
7452                                      int connector_type,
7453                                      struct dc_link *link,
7454                                      int link_index)
7455 {
7456         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7457
7458         /*
7459          * Some of the properties below require access to state, like bpc.
7460          * Allocate some default initial connector state with our reset helper.
7461          */
7462         if (aconnector->base.funcs->reset)
7463                 aconnector->base.funcs->reset(&aconnector->base);
7464
7465         aconnector->connector_id = link_index;
7466         aconnector->bl_idx = -1;
7467         aconnector->dc_link = link;
7468         aconnector->base.interlace_allowed = false;
7469         aconnector->base.doublescan_allowed = false;
7470         aconnector->base.stereo_allowed = false;
7471         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7472         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7473         aconnector->audio_inst = -1;
7474         aconnector->pack_sdp_v1_3 = false;
7475         aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7476         memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
7477         mutex_init(&aconnector->hpd_lock);
7478         mutex_init(&aconnector->handle_mst_msg_ready);
7479
7480         /*
7481          * configure support HPD hot plug connector_>polled default value is 0
7482          * which means HPD hot plug not supported
7483          */
7484         switch (connector_type) {
7485         case DRM_MODE_CONNECTOR_HDMIA:
7486                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7487                 aconnector->base.ycbcr_420_allowed =
7488                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7489                 break;
7490         case DRM_MODE_CONNECTOR_DisplayPort:
7491                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7492                 link->link_enc = link_enc_cfg_get_link_enc(link);
7493                 ASSERT(link->link_enc);
7494                 if (link->link_enc)
7495                         aconnector->base.ycbcr_420_allowed =
7496                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7497                 break;
7498         case DRM_MODE_CONNECTOR_DVID:
7499                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7500                 break;
7501         default:
7502                 break;
7503         }
7504
7505         drm_object_attach_property(&aconnector->base.base,
7506                                 dm->ddev->mode_config.scaling_mode_property,
7507                                 DRM_MODE_SCALE_NONE);
7508
7509         drm_object_attach_property(&aconnector->base.base,
7510                                 adev->mode_info.underscan_property,
7511                                 UNDERSCAN_OFF);
7512         drm_object_attach_property(&aconnector->base.base,
7513                                 adev->mode_info.underscan_hborder_property,
7514                                 0);
7515         drm_object_attach_property(&aconnector->base.base,
7516                                 adev->mode_info.underscan_vborder_property,
7517                                 0);
7518
7519         if (!aconnector->mst_root)
7520                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7521
7522         aconnector->base.state->max_bpc = 16;
7523         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7524
7525         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7526             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7527                 drm_object_attach_property(&aconnector->base.base,
7528                                 adev->mode_info.abm_level_property, 0);
7529         }
7530
7531         if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7532                 /* Content Type is currently only implemented for HDMI. */
7533                 drm_connector_attach_content_type_property(&aconnector->base);
7534         }
7535
7536         if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7537                 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
7538                         drm_connector_attach_colorspace_property(&aconnector->base);
7539         } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
7540                    connector_type == DRM_MODE_CONNECTOR_eDP) {
7541                 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
7542                         drm_connector_attach_colorspace_property(&aconnector->base);
7543         }
7544
7545         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7546             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7547             connector_type == DRM_MODE_CONNECTOR_eDP) {
7548                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7549
7550                 if (!aconnector->mst_root)
7551                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7552
7553                 if (adev->dm.hdcp_workqueue)
7554                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7555         }
7556 }
7557
7558 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7559                               struct i2c_msg *msgs, int num)
7560 {
7561         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7562         struct ddc_service *ddc_service = i2c->ddc_service;
7563         struct i2c_command cmd;
7564         int i;
7565         int result = -EIO;
7566
7567         if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
7568                 return result;
7569
7570         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7571
7572         if (!cmd.payloads)
7573                 return result;
7574
7575         cmd.number_of_payloads = num;
7576         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7577         cmd.speed = 100;
7578
7579         for (i = 0; i < num; i++) {
7580                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7581                 cmd.payloads[i].address = msgs[i].addr;
7582                 cmd.payloads[i].length = msgs[i].len;
7583                 cmd.payloads[i].data = msgs[i].buf;
7584         }
7585
7586         if (dc_submit_i2c(
7587                         ddc_service->ctx->dc,
7588                         ddc_service->link->link_index,
7589                         &cmd))
7590                 result = num;
7591
7592         kfree(cmd.payloads);
7593         return result;
7594 }
7595
7596 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7597 {
7598         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7599 }
7600
7601 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7602         .master_xfer = amdgpu_dm_i2c_xfer,
7603         .functionality = amdgpu_dm_i2c_func,
7604 };
7605
7606 static struct amdgpu_i2c_adapter *
7607 create_i2c(struct ddc_service *ddc_service,
7608            int link_index,
7609            int *res)
7610 {
7611         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7612         struct amdgpu_i2c_adapter *i2c;
7613
7614         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7615         if (!i2c)
7616                 return NULL;
7617         i2c->base.owner = THIS_MODULE;
7618         i2c->base.dev.parent = &adev->pdev->dev;
7619         i2c->base.algo = &amdgpu_dm_i2c_algo;
7620         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7621         i2c_set_adapdata(&i2c->base, i2c);
7622         i2c->ddc_service = ddc_service;
7623
7624         return i2c;
7625 }
7626
7627
7628 /*
7629  * Note: this function assumes that dc_link_detect() was called for the
7630  * dc_link which will be represented by this aconnector.
7631  */
7632 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7633                                     struct amdgpu_dm_connector *aconnector,
7634                                     u32 link_index,
7635                                     struct amdgpu_encoder *aencoder)
7636 {
7637         int res = 0;
7638         int connector_type;
7639         struct dc *dc = dm->dc;
7640         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7641         struct amdgpu_i2c_adapter *i2c;
7642
7643         /* Not needed for writeback connector */
7644         link->priv = aconnector;
7645
7646
7647         i2c = create_i2c(link->ddc, link->link_index, &res);
7648         if (!i2c) {
7649                 DRM_ERROR("Failed to create i2c adapter data\n");
7650                 return -ENOMEM;
7651         }
7652
7653         aconnector->i2c = i2c;
7654         res = i2c_add_adapter(&i2c->base);
7655
7656         if (res) {
7657                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7658                 goto out_free;
7659         }
7660
7661         connector_type = to_drm_connector_type(link->connector_signal);
7662
7663         res = drm_connector_init_with_ddc(
7664                         dm->ddev,
7665                         &aconnector->base,
7666                         &amdgpu_dm_connector_funcs,
7667                         connector_type,
7668                         &i2c->base);
7669
7670         if (res) {
7671                 DRM_ERROR("connector_init failed\n");
7672                 aconnector->connector_id = -1;
7673                 goto out_free;
7674         }
7675
7676         drm_connector_helper_add(
7677                         &aconnector->base,
7678                         &amdgpu_dm_connector_helper_funcs);
7679
7680         amdgpu_dm_connector_init_helper(
7681                 dm,
7682                 aconnector,
7683                 connector_type,
7684                 link,
7685                 link_index);
7686
7687         drm_connector_attach_encoder(
7688                 &aconnector->base, &aencoder->base);
7689
7690         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7691                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7692                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7693
7694 out_free:
7695         if (res) {
7696                 kfree(i2c);
7697                 aconnector->i2c = NULL;
7698         }
7699         return res;
7700 }
7701
7702 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7703 {
7704         switch (adev->mode_info.num_crtc) {
7705         case 1:
7706                 return 0x1;
7707         case 2:
7708                 return 0x3;
7709         case 3:
7710                 return 0x7;
7711         case 4:
7712                 return 0xf;
7713         case 5:
7714                 return 0x1f;
7715         case 6:
7716         default:
7717                 return 0x3f;
7718         }
7719 }
7720
7721 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7722                                   struct amdgpu_encoder *aencoder,
7723                                   uint32_t link_index)
7724 {
7725         struct amdgpu_device *adev = drm_to_adev(dev);
7726
7727         int res = drm_encoder_init(dev,
7728                                    &aencoder->base,
7729                                    &amdgpu_dm_encoder_funcs,
7730                                    DRM_MODE_ENCODER_TMDS,
7731                                    NULL);
7732
7733         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7734
7735         if (!res)
7736                 aencoder->encoder_id = link_index;
7737         else
7738                 aencoder->encoder_id = -1;
7739
7740         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7741
7742         return res;
7743 }
7744
7745 static void manage_dm_interrupts(struct amdgpu_device *adev,
7746                                  struct amdgpu_crtc *acrtc,
7747                                  bool enable)
7748 {
7749         /*
7750          * We have no guarantee that the frontend index maps to the same
7751          * backend index - some even map to more than one.
7752          *
7753          * TODO: Use a different interrupt or check DC itself for the mapping.
7754          */
7755         int irq_type =
7756                 amdgpu_display_crtc_idx_to_irq_type(
7757                         adev,
7758                         acrtc->crtc_id);
7759
7760         if (enable) {
7761                 drm_crtc_vblank_on(&acrtc->base);
7762                 amdgpu_irq_get(
7763                         adev,
7764                         &adev->pageflip_irq,
7765                         irq_type);
7766 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7767                 amdgpu_irq_get(
7768                         adev,
7769                         &adev->vline0_irq,
7770                         irq_type);
7771 #endif
7772         } else {
7773 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7774                 amdgpu_irq_put(
7775                         adev,
7776                         &adev->vline0_irq,
7777                         irq_type);
7778 #endif
7779                 amdgpu_irq_put(
7780                         adev,
7781                         &adev->pageflip_irq,
7782                         irq_type);
7783                 drm_crtc_vblank_off(&acrtc->base);
7784         }
7785 }
7786
7787 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7788                                       struct amdgpu_crtc *acrtc)
7789 {
7790         int irq_type =
7791                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7792
7793         /**
7794          * This reads the current state for the IRQ and force reapplies
7795          * the setting to hardware.
7796          */
7797         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7798 }
7799
7800 static bool
7801 is_scaling_state_different(const struct dm_connector_state *dm_state,
7802                            const struct dm_connector_state *old_dm_state)
7803 {
7804         if (dm_state->scaling != old_dm_state->scaling)
7805                 return true;
7806         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7807                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7808                         return true;
7809         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7810                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7811                         return true;
7812         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7813                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7814                 return true;
7815         return false;
7816 }
7817
7818 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7819                                             struct drm_crtc_state *old_crtc_state,
7820                                             struct drm_connector_state *new_conn_state,
7821                                             struct drm_connector_state *old_conn_state,
7822                                             const struct drm_connector *connector,
7823                                             struct hdcp_workqueue *hdcp_w)
7824 {
7825         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7826         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7827
7828         pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7829                 connector->index, connector->status, connector->dpms);
7830         pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7831                 old_conn_state->content_protection, new_conn_state->content_protection);
7832
7833         if (old_crtc_state)
7834                 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7835                 old_crtc_state->enable,
7836                 old_crtc_state->active,
7837                 old_crtc_state->mode_changed,
7838                 old_crtc_state->active_changed,
7839                 old_crtc_state->connectors_changed);
7840
7841         if (new_crtc_state)
7842                 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7843                 new_crtc_state->enable,
7844                 new_crtc_state->active,
7845                 new_crtc_state->mode_changed,
7846                 new_crtc_state->active_changed,
7847                 new_crtc_state->connectors_changed);
7848
7849         /* hdcp content type change */
7850         if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7851             new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7852                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7853                 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
7854                 return true;
7855         }
7856
7857         /* CP is being re enabled, ignore this */
7858         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7859             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7860                 if (new_crtc_state && new_crtc_state->mode_changed) {
7861                         new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7862                         pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7863                         return true;
7864                 }
7865                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7866                 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
7867                 return false;
7868         }
7869
7870         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7871          *
7872          * Handles:     UNDESIRED -> ENABLED
7873          */
7874         if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7875             new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7876                 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7877
7878         /* Stream removed and re-enabled
7879          *
7880          * Can sometimes overlap with the HPD case,
7881          * thus set update_hdcp to false to avoid
7882          * setting HDCP multiple times.
7883          *
7884          * Handles:     DESIRED -> DESIRED (Special case)
7885          */
7886         if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7887                 new_conn_state->crtc && new_conn_state->crtc->enabled &&
7888                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7889                 dm_con_state->update_hdcp = false;
7890                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7891                         __func__);
7892                 return true;
7893         }
7894
7895         /* Hot-plug, headless s3, dpms
7896          *
7897          * Only start HDCP if the display is connected/enabled.
7898          * update_hdcp flag will be set to false until the next
7899          * HPD comes in.
7900          *
7901          * Handles:     DESIRED -> DESIRED (Special case)
7902          */
7903         if (dm_con_state->update_hdcp &&
7904         new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7905         connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7906                 dm_con_state->update_hdcp = false;
7907                 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7908                         __func__);
7909                 return true;
7910         }
7911
7912         if (old_conn_state->content_protection == new_conn_state->content_protection) {
7913                 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7914                         if (new_crtc_state && new_crtc_state->mode_changed) {
7915                                 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7916                                         __func__);
7917                                 return true;
7918                         }
7919                         pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7920                                 __func__);
7921                         return false;
7922                 }
7923
7924                 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
7925                 return false;
7926         }
7927
7928         if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7929                 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7930                         __func__);
7931                 return true;
7932         }
7933
7934         pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
7935         return false;
7936 }
7937
7938 static void remove_stream(struct amdgpu_device *adev,
7939                           struct amdgpu_crtc *acrtc,
7940                           struct dc_stream_state *stream)
7941 {
7942         /* this is the update mode case */
7943
7944         acrtc->otg_inst = -1;
7945         acrtc->enabled = false;
7946 }
7947
7948 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7949 {
7950
7951         assert_spin_locked(&acrtc->base.dev->event_lock);
7952         WARN_ON(acrtc->event);
7953
7954         acrtc->event = acrtc->base.state->event;
7955
7956         /* Set the flip status */
7957         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7958
7959         /* Mark this event as consumed */
7960         acrtc->base.state->event = NULL;
7961
7962         drm_dbg_state(acrtc->base.dev,
7963                       "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7964                       acrtc->crtc_id);
7965 }
7966
7967 static void update_freesync_state_on_stream(
7968         struct amdgpu_display_manager *dm,
7969         struct dm_crtc_state *new_crtc_state,
7970         struct dc_stream_state *new_stream,
7971         struct dc_plane_state *surface,
7972         u32 flip_timestamp_in_us)
7973 {
7974         struct mod_vrr_params vrr_params;
7975         struct dc_info_packet vrr_infopacket = {0};
7976         struct amdgpu_device *adev = dm->adev;
7977         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7978         unsigned long flags;
7979         bool pack_sdp_v1_3 = false;
7980         struct amdgpu_dm_connector *aconn;
7981         enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
7982
7983         if (!new_stream)
7984                 return;
7985
7986         /*
7987          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7988          * For now it's sufficient to just guard against these conditions.
7989          */
7990
7991         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7992                 return;
7993
7994         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7995         vrr_params = acrtc->dm_irq_params.vrr_params;
7996
7997         if (surface) {
7998                 mod_freesync_handle_preflip(
7999                         dm->freesync_module,
8000                         surface,
8001                         new_stream,
8002                         flip_timestamp_in_us,
8003                         &vrr_params);
8004
8005                 if (adev->family < AMDGPU_FAMILY_AI &&
8006                     amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
8007                         mod_freesync_handle_v_update(dm->freesync_module,
8008                                                      new_stream, &vrr_params);
8009
8010                         /* Need to call this before the frame ends. */
8011                         dc_stream_adjust_vmin_vmax(dm->dc,
8012                                                    new_crtc_state->stream,
8013                                                    &vrr_params.adjust);
8014                 }
8015         }
8016
8017         aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
8018
8019         if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
8020                 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
8021
8022                 if (aconn->vsdb_info.amd_vsdb_version == 1)
8023                         packet_type = PACKET_TYPE_FS_V1;
8024                 else if (aconn->vsdb_info.amd_vsdb_version == 2)
8025                         packet_type = PACKET_TYPE_FS_V2;
8026                 else if (aconn->vsdb_info.amd_vsdb_version == 3)
8027                         packet_type = PACKET_TYPE_FS_V3;
8028
8029                 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
8030                                         &new_stream->adaptive_sync_infopacket);
8031         }
8032
8033         mod_freesync_build_vrr_infopacket(
8034                 dm->freesync_module,
8035                 new_stream,
8036                 &vrr_params,
8037                 packet_type,
8038                 TRANSFER_FUNC_UNKNOWN,
8039                 &vrr_infopacket,
8040                 pack_sdp_v1_3);
8041
8042         new_crtc_state->freesync_vrr_info_changed |=
8043                 (memcmp(&new_crtc_state->vrr_infopacket,
8044                         &vrr_infopacket,
8045                         sizeof(vrr_infopacket)) != 0);
8046
8047         acrtc->dm_irq_params.vrr_params = vrr_params;
8048         new_crtc_state->vrr_infopacket = vrr_infopacket;
8049
8050         new_stream->vrr_infopacket = vrr_infopacket;
8051         new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
8052
8053         if (new_crtc_state->freesync_vrr_info_changed)
8054                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8055                               new_crtc_state->base.crtc->base.id,
8056                               (int)new_crtc_state->base.vrr_enabled,
8057                               (int)vrr_params.state);
8058
8059         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8060 }
8061
8062 static void update_stream_irq_parameters(
8063         struct amdgpu_display_manager *dm,
8064         struct dm_crtc_state *new_crtc_state)
8065 {
8066         struct dc_stream_state *new_stream = new_crtc_state->stream;
8067         struct mod_vrr_params vrr_params;
8068         struct mod_freesync_config config = new_crtc_state->freesync_config;
8069         struct amdgpu_device *adev = dm->adev;
8070         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8071         unsigned long flags;
8072
8073         if (!new_stream)
8074                 return;
8075
8076         /*
8077          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8078          * For now it's sufficient to just guard against these conditions.
8079          */
8080         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8081                 return;
8082
8083         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8084         vrr_params = acrtc->dm_irq_params.vrr_params;
8085
8086         if (new_crtc_state->vrr_supported &&
8087             config.min_refresh_in_uhz &&
8088             config.max_refresh_in_uhz) {
8089                 /*
8090                  * if freesync compatible mode was set, config.state will be set
8091                  * in atomic check
8092                  */
8093                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8094                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8095                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8096                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8097                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8098                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8099                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8100                 } else {
8101                         config.state = new_crtc_state->base.vrr_enabled ?
8102                                                      VRR_STATE_ACTIVE_VARIABLE :
8103                                                      VRR_STATE_INACTIVE;
8104                 }
8105         } else {
8106                 config.state = VRR_STATE_UNSUPPORTED;
8107         }
8108
8109         mod_freesync_build_vrr_params(dm->freesync_module,
8110                                       new_stream,
8111                                       &config, &vrr_params);
8112
8113         new_crtc_state->freesync_config = config;
8114         /* Copy state for access from DM IRQ handler */
8115         acrtc->dm_irq_params.freesync_config = config;
8116         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8117         acrtc->dm_irq_params.vrr_params = vrr_params;
8118         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8119 }
8120
8121 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8122                                             struct dm_crtc_state *new_state)
8123 {
8124         bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
8125         bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
8126
8127         if (!old_vrr_active && new_vrr_active) {
8128                 /* Transition VRR inactive -> active:
8129                  * While VRR is active, we must not disable vblank irq, as a
8130                  * reenable after disable would compute bogus vblank/pflip
8131                  * timestamps if it likely happened inside display front-porch.
8132                  *
8133                  * We also need vupdate irq for the actual core vblank handling
8134                  * at end of vblank.
8135                  */
8136                 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
8137                 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
8138                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8139                                  __func__, new_state->base.crtc->base.id);
8140         } else if (old_vrr_active && !new_vrr_active) {
8141                 /* Transition VRR active -> inactive:
8142                  * Allow vblank irq disable again for fixed refresh rate.
8143                  */
8144                 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
8145                 drm_crtc_vblank_put(new_state->base.crtc);
8146                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8147                                  __func__, new_state->base.crtc->base.id);
8148         }
8149 }
8150
8151 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8152 {
8153         struct drm_plane *plane;
8154         struct drm_plane_state *old_plane_state;
8155         int i;
8156
8157         /*
8158          * TODO: Make this per-stream so we don't issue redundant updates for
8159          * commits with multiple streams.
8160          */
8161         for_each_old_plane_in_state(state, plane, old_plane_state, i)
8162                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8163                         amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
8164 }
8165
8166 static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
8167 {
8168         struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
8169
8170         return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
8171 }
8172
8173 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8174                                     struct drm_device *dev,
8175                                     struct amdgpu_display_manager *dm,
8176                                     struct drm_crtc *pcrtc,
8177                                     bool wait_for_vblank)
8178 {
8179         u32 i;
8180         u64 timestamp_ns = ktime_get_ns();
8181         struct drm_plane *plane;
8182         struct drm_plane_state *old_plane_state, *new_plane_state;
8183         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8184         struct drm_crtc_state *new_pcrtc_state =
8185                         drm_atomic_get_new_crtc_state(state, pcrtc);
8186         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8187         struct dm_crtc_state *dm_old_crtc_state =
8188                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8189         int planes_count = 0, vpos, hpos;
8190         unsigned long flags;
8191         u32 target_vblank, last_flip_vblank;
8192         bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
8193         bool cursor_update = false;
8194         bool pflip_present = false;
8195         bool dirty_rects_changed = false;
8196         struct {
8197                 struct dc_surface_update surface_updates[MAX_SURFACES];
8198                 struct dc_plane_info plane_infos[MAX_SURFACES];
8199                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8200                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8201                 struct dc_stream_update stream_update;
8202         } *bundle;
8203
8204         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8205
8206         if (!bundle) {
8207                 drm_err(dev, "Failed to allocate update bundle\n");
8208                 goto cleanup;
8209         }
8210
8211         /*
8212          * Disable the cursor first if we're disabling all the planes.
8213          * It'll remain on the screen after the planes are re-enabled
8214          * if we don't.
8215          */
8216         if (acrtc_state->active_planes == 0)
8217                 amdgpu_dm_commit_cursors(state);
8218
8219         /* update planes when needed */
8220         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8221                 struct drm_crtc *crtc = new_plane_state->crtc;
8222                 struct drm_crtc_state *new_crtc_state;
8223                 struct drm_framebuffer *fb = new_plane_state->fb;
8224                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8225                 bool plane_needs_flip;
8226                 struct dc_plane_state *dc_plane;
8227                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8228
8229                 /* Cursor plane is handled after stream updates */
8230                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8231                         if ((fb && crtc == pcrtc) ||
8232                             (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8233                                 cursor_update = true;
8234
8235                         continue;
8236                 }
8237
8238                 if (!fb || !crtc || pcrtc != crtc)
8239                         continue;
8240
8241                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8242                 if (!new_crtc_state->active)
8243                         continue;
8244
8245                 dc_plane = dm_new_plane_state->dc_state;
8246                 if (!dc_plane)
8247                         continue;
8248
8249                 bundle->surface_updates[planes_count].surface = dc_plane;
8250                 if (new_pcrtc_state->color_mgmt_changed) {
8251                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8252                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8253                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8254                         bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
8255                         bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func;
8256                         bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func;
8257                         bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf;
8258                 }
8259
8260                 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
8261                                      &bundle->scaling_infos[planes_count]);
8262
8263                 bundle->surface_updates[planes_count].scaling_info =
8264                         &bundle->scaling_infos[planes_count];
8265
8266                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8267
8268                 pflip_present = pflip_present || plane_needs_flip;
8269
8270                 if (!plane_needs_flip) {
8271                         planes_count += 1;
8272                         continue;
8273                 }
8274
8275                 fill_dc_plane_info_and_addr(
8276                         dm->adev, new_plane_state,
8277                         afb->tiling_flags,
8278                         &bundle->plane_infos[planes_count],
8279                         &bundle->flip_addrs[planes_count].address,
8280                         afb->tmz_surface, false);
8281
8282                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
8283                                  new_plane_state->plane->index,
8284                                  bundle->plane_infos[planes_count].dcc.enable);
8285
8286                 bundle->surface_updates[planes_count].plane_info =
8287                         &bundle->plane_infos[planes_count];
8288
8289                 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
8290                     acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
8291                         fill_dc_dirty_rects(plane, old_plane_state,
8292                                             new_plane_state, new_crtc_state,
8293                                             &bundle->flip_addrs[planes_count],
8294                                             &dirty_rects_changed);
8295
8296                         /*
8297                          * If the dirty regions changed, PSR-SU need to be disabled temporarily
8298                          * and enabled it again after dirty regions are stable to avoid video glitch.
8299                          * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8300                          * during the PSR-SU was disabled.
8301                          */
8302                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8303                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8304 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8305                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8306 #endif
8307                             dirty_rects_changed) {
8308                                 mutex_lock(&dm->dc_lock);
8309                                 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8310                                 timestamp_ns;
8311                                 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8312                                         amdgpu_dm_psr_disable(acrtc_state->stream);
8313                                 mutex_unlock(&dm->dc_lock);
8314                         }
8315                 }
8316
8317                 /*
8318                  * Only allow immediate flips for fast updates that don't
8319                  * change memory domain, FB pitch, DCC state, rotation or
8320                  * mirroring.
8321                  *
8322                  * dm_crtc_helper_atomic_check() only accepts async flips with
8323                  * fast updates.
8324                  */
8325                 if (crtc->state->async_flip &&
8326                     (acrtc_state->update_type != UPDATE_TYPE_FAST ||
8327                      get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
8328                         drm_warn_once(state->dev,
8329                                       "[PLANE:%d:%s] async flip with non-fast update\n",
8330                                       plane->base.id, plane->name);
8331
8332                 bundle->flip_addrs[planes_count].flip_immediate =
8333                         crtc->state->async_flip &&
8334                         acrtc_state->update_type == UPDATE_TYPE_FAST &&
8335                         get_mem_type(old_plane_state->fb) == get_mem_type(fb);
8336
8337                 timestamp_ns = ktime_get_ns();
8338                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8339                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8340                 bundle->surface_updates[planes_count].surface = dc_plane;
8341
8342                 if (!bundle->surface_updates[planes_count].surface) {
8343                         DRM_ERROR("No surface for CRTC: id=%d\n",
8344                                         acrtc_attach->crtc_id);
8345                         continue;
8346                 }
8347
8348                 if (plane == pcrtc->primary)
8349                         update_freesync_state_on_stream(
8350                                 dm,
8351                                 acrtc_state,
8352                                 acrtc_state->stream,
8353                                 dc_plane,
8354                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8355
8356                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
8357                                  __func__,
8358                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8359                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8360
8361                 planes_count += 1;
8362
8363         }
8364
8365         if (pflip_present) {
8366                 if (!vrr_active) {
8367                         /* Use old throttling in non-vrr fixed refresh rate mode
8368                          * to keep flip scheduling based on target vblank counts
8369                          * working in a backwards compatible way, e.g., for
8370                          * clients using the GLX_OML_sync_control extension or
8371                          * DRI3/Present extension with defined target_msc.
8372                          */
8373                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8374                 } else {
8375                         /* For variable refresh rate mode only:
8376                          * Get vblank of last completed flip to avoid > 1 vrr
8377                          * flips per video frame by use of throttling, but allow
8378                          * flip programming anywhere in the possibly large
8379                          * variable vrr vblank interval for fine-grained flip
8380                          * timing control and more opportunity to avoid stutter
8381                          * on late submission of flips.
8382                          */
8383                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8384                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8385                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8386                 }
8387
8388                 target_vblank = last_flip_vblank + wait_for_vblank;
8389
8390                 /*
8391                  * Wait until we're out of the vertical blank period before the one
8392                  * targeted by the flip
8393                  */
8394                 while ((acrtc_attach->enabled &&
8395                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8396                                                             0, &vpos, &hpos, NULL,
8397                                                             NULL, &pcrtc->hwmode)
8398                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8399                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8400                         (int)(target_vblank -
8401                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8402                         usleep_range(1000, 1100);
8403                 }
8404
8405                 /**
8406                  * Prepare the flip event for the pageflip interrupt to handle.
8407                  *
8408                  * This only works in the case where we've already turned on the
8409                  * appropriate hardware blocks (eg. HUBP) so in the transition case
8410                  * from 0 -> n planes we have to skip a hardware generated event
8411                  * and rely on sending it from software.
8412                  */
8413                 if (acrtc_attach->base.state->event &&
8414                     acrtc_state->active_planes > 0) {
8415                         drm_crtc_vblank_get(pcrtc);
8416
8417                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8418
8419                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8420                         prepare_flip_isr(acrtc_attach);
8421
8422                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8423                 }
8424
8425                 if (acrtc_state->stream) {
8426                         if (acrtc_state->freesync_vrr_info_changed)
8427                                 bundle->stream_update.vrr_infopacket =
8428                                         &acrtc_state->stream->vrr_infopacket;
8429                 }
8430         } else if (cursor_update && acrtc_state->active_planes > 0 &&
8431                    acrtc_attach->base.state->event) {
8432                 drm_crtc_vblank_get(pcrtc);
8433
8434                 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8435
8436                 acrtc_attach->event = acrtc_attach->base.state->event;
8437                 acrtc_attach->base.state->event = NULL;
8438
8439                 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8440         }
8441
8442         /* Update the planes if changed or disable if we don't have any. */
8443         if ((planes_count || acrtc_state->active_planes == 0) &&
8444                 acrtc_state->stream) {
8445                 /*
8446                  * If PSR or idle optimizations are enabled then flush out
8447                  * any pending work before hardware programming.
8448                  */
8449                 if (dm->vblank_control_workqueue)
8450                         flush_workqueue(dm->vblank_control_workqueue);
8451
8452                 bundle->stream_update.stream = acrtc_state->stream;
8453                 if (new_pcrtc_state->mode_changed) {
8454                         bundle->stream_update.src = acrtc_state->stream->src;
8455                         bundle->stream_update.dst = acrtc_state->stream->dst;
8456                 }
8457
8458                 if (new_pcrtc_state->color_mgmt_changed) {
8459                         /*
8460                          * TODO: This isn't fully correct since we've actually
8461                          * already modified the stream in place.
8462                          */
8463                         bundle->stream_update.gamut_remap =
8464                                 &acrtc_state->stream->gamut_remap_matrix;
8465                         bundle->stream_update.output_csc_transform =
8466                                 &acrtc_state->stream->csc_color_matrix;
8467                         bundle->stream_update.out_transfer_func =
8468                                 acrtc_state->stream->out_transfer_func;
8469                         bundle->stream_update.lut3d_func =
8470                                 (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
8471                         bundle->stream_update.func_shaper =
8472                                 (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
8473                 }
8474
8475                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8476                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8477                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
8478
8479                 mutex_lock(&dm->dc_lock);
8480                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8481                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8482                         amdgpu_dm_psr_disable(acrtc_state->stream);
8483                 mutex_unlock(&dm->dc_lock);
8484
8485                 /*
8486                  * If FreeSync state on the stream has changed then we need to
8487                  * re-adjust the min/max bounds now that DC doesn't handle this
8488                  * as part of commit.
8489                  */
8490                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8491                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8492                         dc_stream_adjust_vmin_vmax(
8493                                 dm->dc, acrtc_state->stream,
8494                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8495                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8496                 }
8497                 mutex_lock(&dm->dc_lock);
8498                 update_planes_and_stream_adapter(dm->dc,
8499                                          acrtc_state->update_type,
8500                                          planes_count,
8501                                          acrtc_state->stream,
8502                                          &bundle->stream_update,
8503                                          bundle->surface_updates);
8504
8505                 /**
8506                  * Enable or disable the interrupts on the backend.
8507                  *
8508                  * Most pipes are put into power gating when unused.
8509                  *
8510                  * When power gating is enabled on a pipe we lose the
8511                  * interrupt enablement state when power gating is disabled.
8512                  *
8513                  * So we need to update the IRQ control state in hardware
8514                  * whenever the pipe turns on (since it could be previously
8515                  * power gated) or off (since some pipes can't be power gated
8516                  * on some ASICs).
8517                  */
8518                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8519                         dm_update_pflip_irq_state(drm_to_adev(dev),
8520                                                   acrtc_attach);
8521
8522                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8523                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8524                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8525                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8526
8527                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8528                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8529                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8530                         struct amdgpu_dm_connector *aconn =
8531                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8532
8533                         if (aconn->psr_skip_count > 0)
8534                                 aconn->psr_skip_count--;
8535
8536                         /* Allow PSR when skip count is 0. */
8537                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8538
8539                         /*
8540                          * If sink supports PSR SU, there is no need to rely on
8541                          * a vblank event disable request to enable PSR. PSR SU
8542                          * can be enabled immediately once OS demonstrates an
8543                          * adequate number of fast atomic commits to notify KMD
8544                          * of update events. See `vblank_control_worker()`.
8545                          */
8546                         if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8547                             acrtc_attach->dm_irq_params.allow_psr_entry &&
8548 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8549                             !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8550 #endif
8551                             !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8552                             (timestamp_ns -
8553                             acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8554                             500000000)
8555                                 amdgpu_dm_psr_enable(acrtc_state->stream);
8556                 } else {
8557                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
8558                 }
8559
8560                 mutex_unlock(&dm->dc_lock);
8561         }
8562
8563         /*
8564          * Update cursor state *after* programming all the planes.
8565          * This avoids redundant programming in the case where we're going
8566          * to be disabling a single plane - those pipes are being disabled.
8567          */
8568         if (acrtc_state->active_planes)
8569                 amdgpu_dm_commit_cursors(state);
8570
8571 cleanup:
8572         kfree(bundle);
8573 }
8574
8575 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8576                                    struct drm_atomic_state *state)
8577 {
8578         struct amdgpu_device *adev = drm_to_adev(dev);
8579         struct amdgpu_dm_connector *aconnector;
8580         struct drm_connector *connector;
8581         struct drm_connector_state *old_con_state, *new_con_state;
8582         struct drm_crtc_state *new_crtc_state;
8583         struct dm_crtc_state *new_dm_crtc_state;
8584         const struct dc_stream_status *status;
8585         int i, inst;
8586
8587         /* Notify device removals. */
8588         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8589                 if (old_con_state->crtc != new_con_state->crtc) {
8590                         /* CRTC changes require notification. */
8591                         goto notify;
8592                 }
8593
8594                 if (!new_con_state->crtc)
8595                         continue;
8596
8597                 new_crtc_state = drm_atomic_get_new_crtc_state(
8598                         state, new_con_state->crtc);
8599
8600                 if (!new_crtc_state)
8601                         continue;
8602
8603                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8604                         continue;
8605
8606                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8607                         continue;
8608
8609 notify:
8610                 aconnector = to_amdgpu_dm_connector(connector);
8611
8612                 mutex_lock(&adev->dm.audio_lock);
8613                 inst = aconnector->audio_inst;
8614                 aconnector->audio_inst = -1;
8615                 mutex_unlock(&adev->dm.audio_lock);
8616
8617                 amdgpu_dm_audio_eld_notify(adev, inst);
8618         }
8619
8620         /* Notify audio device additions. */
8621         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8622                 if (!new_con_state->crtc)
8623                         continue;
8624
8625                 new_crtc_state = drm_atomic_get_new_crtc_state(
8626                         state, new_con_state->crtc);
8627
8628                 if (!new_crtc_state)
8629                         continue;
8630
8631                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8632                         continue;
8633
8634                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8635                 if (!new_dm_crtc_state->stream)
8636                         continue;
8637
8638                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8639                 if (!status)
8640                         continue;
8641
8642                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
8643                         continue;
8644
8645                 aconnector = to_amdgpu_dm_connector(connector);
8646
8647                 mutex_lock(&adev->dm.audio_lock);
8648                 inst = status->audio_inst;
8649                 aconnector->audio_inst = inst;
8650                 mutex_unlock(&adev->dm.audio_lock);
8651
8652                 amdgpu_dm_audio_eld_notify(adev, inst);
8653         }
8654 }
8655
8656 /*
8657  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8658  * @crtc_state: the DRM CRTC state
8659  * @stream_state: the DC stream state.
8660  *
8661  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8662  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8663  */
8664 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8665                                                 struct dc_stream_state *stream_state)
8666 {
8667         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8668 }
8669
8670 static void dm_clear_writeback(struct amdgpu_display_manager *dm,
8671                               struct dm_crtc_state *crtc_state)
8672 {
8673         dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
8674 }
8675
8676 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
8677                                         struct dc_state *dc_state)
8678 {
8679         struct drm_device *dev = state->dev;
8680         struct amdgpu_device *adev = drm_to_adev(dev);
8681         struct amdgpu_display_manager *dm = &adev->dm;
8682         struct drm_crtc *crtc;
8683         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8684         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8685         struct drm_connector_state *old_con_state;
8686         struct drm_connector *connector;
8687         bool mode_set_reset_required = false;
8688         u32 i;
8689
8690         /* Disable writeback */
8691         for_each_old_connector_in_state(state, connector, old_con_state, i) {
8692                 struct dm_connector_state *dm_old_con_state;
8693                 struct amdgpu_crtc *acrtc;
8694
8695                 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
8696                         continue;
8697
8698                 old_crtc_state = NULL;
8699
8700                 dm_old_con_state = to_dm_connector_state(old_con_state);
8701                 if (!dm_old_con_state->base.crtc)
8702                         continue;
8703
8704                 acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
8705                 if (acrtc)
8706                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8707
8708                 if (!acrtc->wb_enabled)
8709                         continue;
8710
8711                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8712
8713                 dm_clear_writeback(dm, dm_old_crtc_state);
8714                 acrtc->wb_enabled = false;
8715         }
8716
8717         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8718                                       new_crtc_state, i) {
8719                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8720
8721                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8722
8723                 if (old_crtc_state->active &&
8724                     (!new_crtc_state->active ||
8725                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8726                         manage_dm_interrupts(adev, acrtc, false);
8727                         dc_stream_release(dm_old_crtc_state->stream);
8728                 }
8729         }
8730
8731         drm_atomic_helper_calc_timestamping_constants(state);
8732
8733         /* update changed items */
8734         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8735                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8736
8737                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8738                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8739
8740                 drm_dbg_state(state->dev,
8741                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
8742                         acrtc->crtc_id,
8743                         new_crtc_state->enable,
8744                         new_crtc_state->active,
8745                         new_crtc_state->planes_changed,
8746                         new_crtc_state->mode_changed,
8747                         new_crtc_state->active_changed,
8748                         new_crtc_state->connectors_changed);
8749
8750                 /* Disable cursor if disabling crtc */
8751                 if (old_crtc_state->active && !new_crtc_state->active) {
8752                         struct dc_cursor_position position;
8753
8754                         memset(&position, 0, sizeof(position));
8755                         mutex_lock(&dm->dc_lock);
8756                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8757                         mutex_unlock(&dm->dc_lock);
8758                 }
8759
8760                 /* Copy all transient state flags into dc state */
8761                 if (dm_new_crtc_state->stream) {
8762                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8763                                                             dm_new_crtc_state->stream);
8764                 }
8765
8766                 /* handles headless hotplug case, updating new_state and
8767                  * aconnector as needed
8768                  */
8769
8770                 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8771
8772                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8773
8774                         if (!dm_new_crtc_state->stream) {
8775                                 /*
8776                                  * this could happen because of issues with
8777                                  * userspace notifications delivery.
8778                                  * In this case userspace tries to set mode on
8779                                  * display which is disconnected in fact.
8780                                  * dc_sink is NULL in this case on aconnector.
8781                                  * We expect reset mode will come soon.
8782                                  *
8783                                  * This can also happen when unplug is done
8784                                  * during resume sequence ended
8785                                  *
8786                                  * In this case, we want to pretend we still
8787                                  * have a sink to keep the pipe running so that
8788                                  * hw state is consistent with the sw state
8789                                  */
8790                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8791                                                 __func__, acrtc->base.base.id);
8792                                 continue;
8793                         }
8794
8795                         if (dm_old_crtc_state->stream)
8796                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8797
8798                         pm_runtime_get_noresume(dev->dev);
8799
8800                         acrtc->enabled = true;
8801                         acrtc->hw_mode = new_crtc_state->mode;
8802                         crtc->hwmode = new_crtc_state->mode;
8803                         mode_set_reset_required = true;
8804                 } else if (modereset_required(new_crtc_state)) {
8805                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8806                         /* i.e. reset mode */
8807                         if (dm_old_crtc_state->stream)
8808                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8809
8810                         mode_set_reset_required = true;
8811                 }
8812         } /* for_each_crtc_in_state() */
8813
8814         /* if there mode set or reset, disable eDP PSR */
8815         if (mode_set_reset_required) {
8816                 if (dm->vblank_control_workqueue)
8817                         flush_workqueue(dm->vblank_control_workqueue);
8818
8819                 amdgpu_dm_psr_disable_all(dm);
8820         }
8821
8822         dm_enable_per_frame_crtc_master_sync(dc_state);
8823         mutex_lock(&dm->dc_lock);
8824         WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
8825
8826         /* Allow idle optimization when vblank count is 0 for display off */
8827         if (dm->active_vblank_irq_count == 0)
8828                 dc_allow_idle_optimizations(dm->dc, true);
8829         mutex_unlock(&dm->dc_lock);
8830
8831         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8832                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8833
8834                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8835
8836                 if (dm_new_crtc_state->stream != NULL) {
8837                         const struct dc_stream_status *status =
8838                                         dc_stream_get_status(dm_new_crtc_state->stream);
8839
8840                         if (!status)
8841                                 status = dc_state_get_stream_status(dc_state,
8842                                                                          dm_new_crtc_state->stream);
8843                         if (!status)
8844                                 drm_err(dev,
8845                                         "got no status for stream %p on acrtc%p\n",
8846                                         dm_new_crtc_state->stream, acrtc);
8847                         else
8848                                 acrtc->otg_inst = status->primary_otg_inst;
8849                 }
8850         }
8851 }
8852
8853 static void dm_set_writeback(struct amdgpu_display_manager *dm,
8854                               struct dm_crtc_state *crtc_state,
8855                               struct drm_connector *connector,
8856                               struct drm_connector_state *new_con_state)
8857 {
8858         struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
8859         struct amdgpu_device *adev = dm->adev;
8860         struct amdgpu_crtc *acrtc;
8861         struct dc_writeback_info *wb_info;
8862         struct pipe_ctx *pipe = NULL;
8863         struct amdgpu_framebuffer *afb;
8864         int i = 0;
8865
8866         wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
8867         if (!wb_info) {
8868                 DRM_ERROR("Failed to allocate wb_info\n");
8869                 return;
8870         }
8871
8872         acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
8873         if (!acrtc) {
8874                 DRM_ERROR("no amdgpu_crtc found\n");
8875                 kfree(wb_info);
8876                 return;
8877         }
8878
8879         afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
8880         if (!afb) {
8881                 DRM_ERROR("No amdgpu_framebuffer found\n");
8882                 kfree(wb_info);
8883                 return;
8884         }
8885
8886         for (i = 0; i < MAX_PIPES; i++) {
8887                 if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
8888                         pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
8889                         break;
8890                 }
8891         }
8892
8893         /* fill in wb_info */
8894         wb_info->wb_enabled = true;
8895
8896         wb_info->dwb_pipe_inst = 0;
8897         wb_info->dwb_params.dwbscl_black_color = 0;
8898         wb_info->dwb_params.hdr_mult = 0x1F000;
8899         wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
8900         wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
8901         wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
8902         wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
8903
8904         /* width & height from crtc */
8905         wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
8906         wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
8907         wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
8908         wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
8909
8910         wb_info->dwb_params.cnv_params.crop_en = false;
8911         wb_info->dwb_params.stereo_params.stereo_enabled = false;
8912
8913         wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
8914         wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
8915         wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
8916         wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
8917
8918         wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
8919
8920         wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
8921
8922         wb_info->dwb_params.scaler_taps.h_taps = 4;
8923         wb_info->dwb_params.scaler_taps.v_taps = 4;
8924         wb_info->dwb_params.scaler_taps.h_taps_c = 2;
8925         wb_info->dwb_params.scaler_taps.v_taps_c = 2;
8926         wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
8927
8928         wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
8929         wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
8930
8931         for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
8932                 wb_info->mcif_buf_params.luma_address[i] = afb->address;
8933                 wb_info->mcif_buf_params.chroma_address[i] = 0;
8934         }
8935
8936         wb_info->mcif_buf_params.p_vmid = 1;
8937         if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
8938                 wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
8939                 wb_info->mcif_warmup_params.region_size =
8940                         wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
8941         }
8942         wb_info->mcif_warmup_params.p_vmid = 1;
8943         wb_info->writeback_source_plane = pipe->plane_state;
8944
8945         dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
8946
8947         acrtc->wb_pending = true;
8948         acrtc->wb_conn = wb_conn;
8949         drm_writeback_queue_job(wb_conn, new_con_state);
8950 }
8951
8952 /**
8953  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8954  * @state: The atomic state to commit
8955  *
8956  * This will tell DC to commit the constructed DC state from atomic_check,
8957  * programming the hardware. Any failures here implies a hardware failure, since
8958  * atomic check should have filtered anything non-kosher.
8959  */
8960 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8961 {
8962         struct drm_device *dev = state->dev;
8963         struct amdgpu_device *adev = drm_to_adev(dev);
8964         struct amdgpu_display_manager *dm = &adev->dm;
8965         struct dm_atomic_state *dm_state;
8966         struct dc_state *dc_state = NULL;
8967         u32 i, j;
8968         struct drm_crtc *crtc;
8969         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8970         unsigned long flags;
8971         bool wait_for_vblank = true;
8972         struct drm_connector *connector;
8973         struct drm_connector_state *old_con_state, *new_con_state;
8974         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8975         int crtc_disable_count = 0;
8976
8977         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8978
8979         if (dm->dc->caps.ips_support) {
8980                 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8981                         if (new_con_state->crtc &&
8982                                 new_con_state->crtc->state->active &&
8983                                 drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
8984                                 dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
8985                                 break;
8986                         }
8987                 }
8988         }
8989
8990         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8991         drm_dp_mst_atomic_wait_for_dependencies(state);
8992
8993         dm_state = dm_atomic_get_new_state(state);
8994         if (dm_state && dm_state->context) {
8995                 dc_state = dm_state->context;
8996                 amdgpu_dm_commit_streams(state, dc_state);
8997         }
8998
8999         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9000                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9001                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9002                 struct amdgpu_dm_connector *aconnector;
9003
9004                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9005                         continue;
9006
9007                 aconnector = to_amdgpu_dm_connector(connector);
9008
9009                 if (!adev->dm.hdcp_workqueue)
9010                         continue;
9011
9012                 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
9013
9014                 if (!connector)
9015                         continue;
9016
9017                 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
9018                         connector->index, connector->status, connector->dpms);
9019                 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
9020                         old_con_state->content_protection, new_con_state->content_protection);
9021
9022                 if (aconnector->dc_sink) {
9023                         if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
9024                                 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
9025                                 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
9026                                 aconnector->dc_sink->edid_caps.display_name);
9027                         }
9028                 }
9029
9030                 new_crtc_state = NULL;
9031                 old_crtc_state = NULL;
9032
9033                 if (acrtc) {
9034                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9035                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9036                 }
9037
9038                 if (old_crtc_state)
9039                         pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9040                         old_crtc_state->enable,
9041                         old_crtc_state->active,
9042                         old_crtc_state->mode_changed,
9043                         old_crtc_state->active_changed,
9044                         old_crtc_state->connectors_changed);
9045
9046                 if (new_crtc_state)
9047                         pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
9048                         new_crtc_state->enable,
9049                         new_crtc_state->active,
9050                         new_crtc_state->mode_changed,
9051                         new_crtc_state->active_changed,
9052                         new_crtc_state->connectors_changed);
9053         }
9054
9055         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9056                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9057                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9058                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9059
9060                 if (!adev->dm.hdcp_workqueue)
9061                         continue;
9062
9063                 new_crtc_state = NULL;
9064                 old_crtc_state = NULL;
9065
9066                 if (acrtc) {
9067                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9068                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9069                 }
9070
9071                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9072
9073                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9074                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9075                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9076                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9077                         dm_new_con_state->update_hdcp = true;
9078                         continue;
9079                 }
9080
9081                 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
9082                                                                                         old_con_state, connector, adev->dm.hdcp_workqueue)) {
9083                         /* when display is unplugged from mst hub, connctor will
9084                          * be destroyed within dm_dp_mst_connector_destroy. connector
9085                          * hdcp perperties, like type, undesired, desired, enabled,
9086                          * will be lost. So, save hdcp properties into hdcp_work within
9087                          * amdgpu_dm_atomic_commit_tail. if the same display is
9088                          * plugged back with same display index, its hdcp properties
9089                          * will be retrieved from hdcp_work within dm_dp_mst_get_modes
9090                          */
9091
9092                         bool enable_encryption = false;
9093
9094                         if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
9095                                 enable_encryption = true;
9096
9097                         if (aconnector->dc_link && aconnector->dc_sink &&
9098                                 aconnector->dc_link->type == dc_connection_mst_branch) {
9099                                 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
9100                                 struct hdcp_workqueue *hdcp_w =
9101                                         &hdcp_work[aconnector->dc_link->link_index];
9102
9103                                 hdcp_w->hdcp_content_type[connector->index] =
9104                                         new_con_state->hdcp_content_type;
9105                                 hdcp_w->content_protection[connector->index] =
9106                                         new_con_state->content_protection;
9107                         }
9108
9109                         if (new_crtc_state && new_crtc_state->mode_changed &&
9110                                 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
9111                                 enable_encryption = true;
9112
9113                         DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
9114
9115                         hdcp_update_display(
9116                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9117                                 new_con_state->hdcp_content_type, enable_encryption);
9118                 }
9119         }
9120
9121         /* Handle connector state changes */
9122         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9123                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9124                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9125                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9126                 struct dc_surface_update *dummy_updates;
9127                 struct dc_stream_update stream_update;
9128                 struct dc_info_packet hdr_packet;
9129                 struct dc_stream_status *status = NULL;
9130                 bool abm_changed, hdr_changed, scaling_changed;
9131
9132                 memset(&stream_update, 0, sizeof(stream_update));
9133
9134                 if (acrtc) {
9135                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9136                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9137                 }
9138
9139                 /* Skip any modesets/resets */
9140                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9141                         continue;
9142
9143                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9144                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9145
9146                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9147                                                              dm_old_con_state);
9148
9149                 abm_changed = dm_new_crtc_state->abm_level !=
9150                               dm_old_crtc_state->abm_level;
9151
9152                 hdr_changed =
9153                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9154
9155                 if (!scaling_changed && !abm_changed && !hdr_changed)
9156                         continue;
9157
9158                 stream_update.stream = dm_new_crtc_state->stream;
9159                 if (scaling_changed) {
9160                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9161                                         dm_new_con_state, dm_new_crtc_state->stream);
9162
9163                         stream_update.src = dm_new_crtc_state->stream->src;
9164                         stream_update.dst = dm_new_crtc_state->stream->dst;
9165                 }
9166
9167                 if (abm_changed) {
9168                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9169
9170                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9171                 }
9172
9173                 if (hdr_changed) {
9174                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9175                         stream_update.hdr_static_metadata = &hdr_packet;
9176                 }
9177
9178                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9179
9180                 if (WARN_ON(!status))
9181                         continue;
9182
9183                 WARN_ON(!status->plane_count);
9184
9185                 /*
9186                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9187                  * Here we create an empty update on each plane.
9188                  * To fix this, DC should permit updating only stream properties.
9189                  */
9190                 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
9191                 for (j = 0; j < status->plane_count; j++)
9192                         dummy_updates[j].surface = status->plane_states[0];
9193
9194
9195                 mutex_lock(&dm->dc_lock);
9196                 dc_update_planes_and_stream(dm->dc,
9197                                             dummy_updates,
9198                                             status->plane_count,
9199                                             dm_new_crtc_state->stream,
9200                                             &stream_update);
9201                 mutex_unlock(&dm->dc_lock);
9202                 kfree(dummy_updates);
9203         }
9204
9205         /**
9206          * Enable interrupts for CRTCs that are newly enabled or went through
9207          * a modeset. It was intentionally deferred until after the front end
9208          * state was modified to wait until the OTG was on and so the IRQ
9209          * handlers didn't access stale or invalid state.
9210          */
9211         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9212                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9213 #ifdef CONFIG_DEBUG_FS
9214                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9215 #endif
9216                 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9217                 if (old_crtc_state->active && !new_crtc_state->active)
9218                         crtc_disable_count++;
9219
9220                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9221                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9222
9223                 /* For freesync config update on crtc state and params for irq */
9224                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9225
9226 #ifdef CONFIG_DEBUG_FS
9227                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9228                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9229                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9230 #endif
9231
9232                 if (new_crtc_state->active &&
9233                     (!old_crtc_state->active ||
9234                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9235                         dc_stream_retain(dm_new_crtc_state->stream);
9236                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9237                         manage_dm_interrupts(adev, acrtc, true);
9238                 }
9239                 /* Handle vrr on->off / off->on transitions */
9240                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
9241
9242 #ifdef CONFIG_DEBUG_FS
9243                 if (new_crtc_state->active &&
9244                     (!old_crtc_state->active ||
9245                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9246                         /**
9247                          * Frontend may have changed so reapply the CRC capture
9248                          * settings for the stream.
9249                          */
9250                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9251 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9252                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9253                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9254                                         acrtc->dm_irq_params.window_param.update_win = true;
9255
9256                                         /**
9257                                          * It takes 2 frames for HW to stably generate CRC when
9258                                          * resuming from suspend, so we set skip_frame_cnt 2.
9259                                          */
9260                                         acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
9261                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9262                                 }
9263 #endif
9264                                 if (amdgpu_dm_crtc_configure_crc_source(
9265                                         crtc, dm_new_crtc_state, cur_crc_src))
9266                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9267                         }
9268                 }
9269 #endif
9270         }
9271
9272         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9273                 if (new_crtc_state->async_flip)
9274                         wait_for_vblank = false;
9275
9276         /* update planes when needed per crtc*/
9277         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9278                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9279
9280                 if (dm_new_crtc_state->stream)
9281                         amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
9282         }
9283
9284         /* Enable writeback */
9285         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9286                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9287                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9288
9289                 if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
9290                         continue;
9291
9292                 if (!new_con_state->writeback_job)
9293                         continue;
9294
9295                 new_crtc_state = NULL;
9296
9297                 if (acrtc)
9298                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9299
9300                 if (acrtc->wb_enabled)
9301                         continue;
9302
9303                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9304
9305                 dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
9306                 acrtc->wb_enabled = true;
9307         }
9308
9309         /* Update audio instances for each connector. */
9310         amdgpu_dm_commit_audio(dev, state);
9311
9312         /* restore the backlight level */
9313         for (i = 0; i < dm->num_of_edps; i++) {
9314                 if (dm->backlight_dev[i] &&
9315                     (dm->actual_brightness[i] != dm->brightness[i]))
9316                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9317         }
9318
9319         /*
9320          * send vblank event on all events not handled in flip and
9321          * mark consumed event for drm_atomic_helper_commit_hw_done
9322          */
9323         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9324         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9325
9326                 if (new_crtc_state->event)
9327                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9328
9329                 new_crtc_state->event = NULL;
9330         }
9331         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9332
9333         /* Signal HW programming completion */
9334         drm_atomic_helper_commit_hw_done(state);
9335
9336         if (wait_for_vblank)
9337                 drm_atomic_helper_wait_for_flip_done(dev, state);
9338
9339         drm_atomic_helper_cleanup_planes(dev, state);
9340
9341         /* Don't free the memory if we are hitting this as part of suspend.
9342          * This way we don't free any memory during suspend; see
9343          * amdgpu_bo_free_kernel().  The memory will be freed in the first
9344          * non-suspend modeset or when the driver is torn down.
9345          */
9346         if (!adev->in_suspend) {
9347                 /* return the stolen vga memory back to VRAM */
9348                 if (!adev->mman.keep_stolen_vga_memory)
9349                         amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9350                 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9351         }
9352
9353         /*
9354          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9355          * so we can put the GPU into runtime suspend if we're not driving any
9356          * displays anymore
9357          */
9358         for (i = 0; i < crtc_disable_count; i++)
9359                 pm_runtime_put_autosuspend(dev->dev);
9360         pm_runtime_mark_last_busy(dev->dev);
9361 }
9362
9363 static int dm_force_atomic_commit(struct drm_connector *connector)
9364 {
9365         int ret = 0;
9366         struct drm_device *ddev = connector->dev;
9367         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9368         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9369         struct drm_plane *plane = disconnected_acrtc->base.primary;
9370         struct drm_connector_state *conn_state;
9371         struct drm_crtc_state *crtc_state;
9372         struct drm_plane_state *plane_state;
9373
9374         if (!state)
9375                 return -ENOMEM;
9376
9377         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9378
9379         /* Construct an atomic state to restore previous display setting */
9380
9381         /*
9382          * Attach connectors to drm_atomic_state
9383          */
9384         conn_state = drm_atomic_get_connector_state(state, connector);
9385
9386         ret = PTR_ERR_OR_ZERO(conn_state);
9387         if (ret)
9388                 goto out;
9389
9390         /* Attach crtc to drm_atomic_state*/
9391         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9392
9393         ret = PTR_ERR_OR_ZERO(crtc_state);
9394         if (ret)
9395                 goto out;
9396
9397         /* force a restore */
9398         crtc_state->mode_changed = true;
9399
9400         /* Attach plane to drm_atomic_state */
9401         plane_state = drm_atomic_get_plane_state(state, plane);
9402
9403         ret = PTR_ERR_OR_ZERO(plane_state);
9404         if (ret)
9405                 goto out;
9406
9407         /* Call commit internally with the state we just constructed */
9408         ret = drm_atomic_commit(state);
9409
9410 out:
9411         drm_atomic_state_put(state);
9412         if (ret)
9413                 DRM_ERROR("Restoring old state failed with %i\n", ret);
9414
9415         return ret;
9416 }
9417
9418 /*
9419  * This function handles all cases when set mode does not come upon hotplug.
9420  * This includes when a display is unplugged then plugged back into the
9421  * same port and when running without usermode desktop manager supprot
9422  */
9423 void dm_restore_drm_connector_state(struct drm_device *dev,
9424                                     struct drm_connector *connector)
9425 {
9426         struct amdgpu_dm_connector *aconnector;
9427         struct amdgpu_crtc *disconnected_acrtc;
9428         struct dm_crtc_state *acrtc_state;
9429
9430         if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9431                 return;
9432
9433         aconnector = to_amdgpu_dm_connector(connector);
9434
9435         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9436                 return;
9437
9438         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9439         if (!disconnected_acrtc)
9440                 return;
9441
9442         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9443         if (!acrtc_state->stream)
9444                 return;
9445
9446         /*
9447          * If the previous sink is not released and different from the current,
9448          * we deduce we are in a state where we can not rely on usermode call
9449          * to turn on the display, so we do it here
9450          */
9451         if (acrtc_state->stream->sink != aconnector->dc_sink)
9452                 dm_force_atomic_commit(&aconnector->base);
9453 }
9454
9455 /*
9456  * Grabs all modesetting locks to serialize against any blocking commits,
9457  * Waits for completion of all non blocking commits.
9458  */
9459 static int do_aquire_global_lock(struct drm_device *dev,
9460                                  struct drm_atomic_state *state)
9461 {
9462         struct drm_crtc *crtc;
9463         struct drm_crtc_commit *commit;
9464         long ret;
9465
9466         /*
9467          * Adding all modeset locks to aquire_ctx will
9468          * ensure that when the framework release it the
9469          * extra locks we are locking here will get released to
9470          */
9471         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9472         if (ret)
9473                 return ret;
9474
9475         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9476                 spin_lock(&crtc->commit_lock);
9477                 commit = list_first_entry_or_null(&crtc->commit_list,
9478                                 struct drm_crtc_commit, commit_entry);
9479                 if (commit)
9480                         drm_crtc_commit_get(commit);
9481                 spin_unlock(&crtc->commit_lock);
9482
9483                 if (!commit)
9484                         continue;
9485
9486                 /*
9487                  * Make sure all pending HW programming completed and
9488                  * page flips done
9489                  */
9490                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9491
9492                 if (ret > 0)
9493                         ret = wait_for_completion_interruptible_timeout(
9494                                         &commit->flip_done, 10*HZ);
9495
9496                 if (ret == 0)
9497                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
9498                                   crtc->base.id, crtc->name);
9499
9500                 drm_crtc_commit_put(commit);
9501         }
9502
9503         return ret < 0 ? ret : 0;
9504 }
9505
9506 static void get_freesync_config_for_crtc(
9507         struct dm_crtc_state *new_crtc_state,
9508         struct dm_connector_state *new_con_state)
9509 {
9510         struct mod_freesync_config config = {0};
9511         struct amdgpu_dm_connector *aconnector;
9512         struct drm_display_mode *mode = &new_crtc_state->base.mode;
9513         int vrefresh = drm_mode_vrefresh(mode);
9514         bool fs_vid_mode = false;
9515
9516         if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
9517                 return;
9518
9519         aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
9520
9521         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9522                                         vrefresh >= aconnector->min_vfreq &&
9523                                         vrefresh <= aconnector->max_vfreq;
9524
9525         if (new_crtc_state->vrr_supported) {
9526                 new_crtc_state->stream->ignore_msa_timing_param = true;
9527                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9528
9529                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9530                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9531                 config.vsif_supported = true;
9532                 config.btr = true;
9533
9534                 if (fs_vid_mode) {
9535                         config.state = VRR_STATE_ACTIVE_FIXED;
9536                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9537                         goto out;
9538                 } else if (new_crtc_state->base.vrr_enabled) {
9539                         config.state = VRR_STATE_ACTIVE_VARIABLE;
9540                 } else {
9541                         config.state = VRR_STATE_INACTIVE;
9542                 }
9543         }
9544 out:
9545         new_crtc_state->freesync_config = config;
9546 }
9547
9548 static void reset_freesync_config_for_crtc(
9549         struct dm_crtc_state *new_crtc_state)
9550 {
9551         new_crtc_state->vrr_supported = false;
9552
9553         memset(&new_crtc_state->vrr_infopacket, 0,
9554                sizeof(new_crtc_state->vrr_infopacket));
9555 }
9556
9557 static bool
9558 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9559                                  struct drm_crtc_state *new_crtc_state)
9560 {
9561         const struct drm_display_mode *old_mode, *new_mode;
9562
9563         if (!old_crtc_state || !new_crtc_state)
9564                 return false;
9565
9566         old_mode = &old_crtc_state->mode;
9567         new_mode = &new_crtc_state->mode;
9568
9569         if (old_mode->clock       == new_mode->clock &&
9570             old_mode->hdisplay    == new_mode->hdisplay &&
9571             old_mode->vdisplay    == new_mode->vdisplay &&
9572             old_mode->htotal      == new_mode->htotal &&
9573             old_mode->vtotal      != new_mode->vtotal &&
9574             old_mode->hsync_start == new_mode->hsync_start &&
9575             old_mode->vsync_start != new_mode->vsync_start &&
9576             old_mode->hsync_end   == new_mode->hsync_end &&
9577             old_mode->vsync_end   != new_mode->vsync_end &&
9578             old_mode->hskew       == new_mode->hskew &&
9579             old_mode->vscan       == new_mode->vscan &&
9580             (old_mode->vsync_end - old_mode->vsync_start) ==
9581             (new_mode->vsync_end - new_mode->vsync_start))
9582                 return true;
9583
9584         return false;
9585 }
9586
9587 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
9588 {
9589         u64 num, den, res;
9590         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9591
9592         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9593
9594         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9595         den = (unsigned long long)new_crtc_state->mode.htotal *
9596               (unsigned long long)new_crtc_state->mode.vtotal;
9597
9598         res = div_u64(num, den);
9599         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9600 }
9601
9602 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9603                          struct drm_atomic_state *state,
9604                          struct drm_crtc *crtc,
9605                          struct drm_crtc_state *old_crtc_state,
9606                          struct drm_crtc_state *new_crtc_state,
9607                          bool enable,
9608                          bool *lock_and_validation_needed)
9609 {
9610         struct dm_atomic_state *dm_state = NULL;
9611         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9612         struct dc_stream_state *new_stream;
9613         int ret = 0;
9614
9615         /*
9616          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9617          * update changed items
9618          */
9619         struct amdgpu_crtc *acrtc = NULL;
9620         struct drm_connector *connector = NULL;
9621         struct amdgpu_dm_connector *aconnector = NULL;
9622         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9623         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9624
9625         new_stream = NULL;
9626
9627         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9628         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9629         acrtc = to_amdgpu_crtc(crtc);
9630         connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9631         if (connector)
9632                 aconnector = to_amdgpu_dm_connector(connector);
9633
9634         /* TODO This hack should go away */
9635         if (connector && enable) {
9636                 /* Make sure fake sink is created in plug-in scenario */
9637                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9638                                                                         connector);
9639                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9640                                                                         connector);
9641
9642                 if (IS_ERR(drm_new_conn_state)) {
9643                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9644                         goto fail;
9645                 }
9646
9647                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9648                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9649
9650                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9651                         goto skip_modeset;
9652
9653                 new_stream = create_validate_stream_for_sink(aconnector,
9654                                                              &new_crtc_state->mode,
9655                                                              dm_new_conn_state,
9656                                                              dm_old_crtc_state->stream);
9657
9658                 /*
9659                  * we can have no stream on ACTION_SET if a display
9660                  * was disconnected during S3, in this case it is not an
9661                  * error, the OS will be updated after detection, and
9662                  * will do the right thing on next atomic commit
9663                  */
9664
9665                 if (!new_stream) {
9666                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9667                                         __func__, acrtc->base.base.id);
9668                         ret = -ENOMEM;
9669                         goto fail;
9670                 }
9671
9672                 /*
9673                  * TODO: Check VSDB bits to decide whether this should
9674                  * be enabled or not.
9675                  */
9676                 new_stream->triggered_crtc_reset.enabled =
9677                         dm->force_timing_sync;
9678
9679                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9680
9681                 ret = fill_hdr_info_packet(drm_new_conn_state,
9682                                            &new_stream->hdr_static_metadata);
9683                 if (ret)
9684                         goto fail;
9685
9686                 /*
9687                  * If we already removed the old stream from the context
9688                  * (and set the new stream to NULL) then we can't reuse
9689                  * the old stream even if the stream and scaling are unchanged.
9690                  * We'll hit the BUG_ON and black screen.
9691                  *
9692                  * TODO: Refactor this function to allow this check to work
9693                  * in all conditions.
9694                  */
9695                 if (dm_new_crtc_state->stream &&
9696                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9697                         goto skip_modeset;
9698
9699                 if (dm_new_crtc_state->stream &&
9700                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9701                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9702                         new_crtc_state->mode_changed = false;
9703                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9704                                          new_crtc_state->mode_changed);
9705                 }
9706         }
9707
9708         /* mode_changed flag may get updated above, need to check again */
9709         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9710                 goto skip_modeset;
9711
9712         drm_dbg_state(state->dev,
9713                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
9714                 acrtc->crtc_id,
9715                 new_crtc_state->enable,
9716                 new_crtc_state->active,
9717                 new_crtc_state->planes_changed,
9718                 new_crtc_state->mode_changed,
9719                 new_crtc_state->active_changed,
9720                 new_crtc_state->connectors_changed);
9721
9722         /* Remove stream for any changed/disabled CRTC */
9723         if (!enable) {
9724
9725                 if (!dm_old_crtc_state->stream)
9726                         goto skip_modeset;
9727
9728                 /* Unset freesync video if it was active before */
9729                 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9730                         dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9731                         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9732                 }
9733
9734                 /* Now check if we should set freesync video mode */
9735                 if (dm_new_crtc_state->stream &&
9736                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9737                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
9738                     is_timing_unchanged_for_freesync(new_crtc_state,
9739                                                      old_crtc_state)) {
9740                         new_crtc_state->mode_changed = false;
9741                         DRM_DEBUG_DRIVER(
9742                                 "Mode change not required for front porch change, setting mode_changed to %d",
9743                                 new_crtc_state->mode_changed);
9744
9745                         set_freesync_fixed_config(dm_new_crtc_state);
9746
9747                         goto skip_modeset;
9748                 } else if (aconnector &&
9749                            is_freesync_video_mode(&new_crtc_state->mode,
9750                                                   aconnector)) {
9751                         struct drm_display_mode *high_mode;
9752
9753                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
9754                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
9755                                 set_freesync_fixed_config(dm_new_crtc_state);
9756                 }
9757
9758                 ret = dm_atomic_get_state(state, &dm_state);
9759                 if (ret)
9760                         goto fail;
9761
9762                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9763                                 crtc->base.id);
9764
9765                 /* i.e. reset mode */
9766                 if (dc_state_remove_stream(
9767                                 dm->dc,
9768                                 dm_state->context,
9769                                 dm_old_crtc_state->stream) != DC_OK) {
9770                         ret = -EINVAL;
9771                         goto fail;
9772                 }
9773
9774                 dc_stream_release(dm_old_crtc_state->stream);
9775                 dm_new_crtc_state->stream = NULL;
9776
9777                 reset_freesync_config_for_crtc(dm_new_crtc_state);
9778
9779                 *lock_and_validation_needed = true;
9780
9781         } else {/* Add stream for any updated/enabled CRTC */
9782                 /*
9783                  * Quick fix to prevent NULL pointer on new_stream when
9784                  * added MST connectors not found in existing crtc_state in the chained mode
9785                  * TODO: need to dig out the root cause of that
9786                  */
9787                 if (!connector)
9788                         goto skip_modeset;
9789
9790                 if (modereset_required(new_crtc_state))
9791                         goto skip_modeset;
9792
9793                 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
9794                                      dm_old_crtc_state->stream)) {
9795
9796                         WARN_ON(dm_new_crtc_state->stream);
9797
9798                         ret = dm_atomic_get_state(state, &dm_state);
9799                         if (ret)
9800                                 goto fail;
9801
9802                         dm_new_crtc_state->stream = new_stream;
9803
9804                         dc_stream_retain(new_stream);
9805
9806                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9807                                          crtc->base.id);
9808
9809                         if (dc_state_add_stream(
9810                                         dm->dc,
9811                                         dm_state->context,
9812                                         dm_new_crtc_state->stream) != DC_OK) {
9813                                 ret = -EINVAL;
9814                                 goto fail;
9815                         }
9816
9817                         *lock_and_validation_needed = true;
9818                 }
9819         }
9820
9821 skip_modeset:
9822         /* Release extra reference */
9823         if (new_stream)
9824                 dc_stream_release(new_stream);
9825
9826         /*
9827          * We want to do dc stream updates that do not require a
9828          * full modeset below.
9829          */
9830         if (!(enable && connector && new_crtc_state->active))
9831                 return 0;
9832         /*
9833          * Given above conditions, the dc state cannot be NULL because:
9834          * 1. We're in the process of enabling CRTCs (just been added
9835          *    to the dc context, or already is on the context)
9836          * 2. Has a valid connector attached, and
9837          * 3. Is currently active and enabled.
9838          * => The dc stream state currently exists.
9839          */
9840         BUG_ON(dm_new_crtc_state->stream == NULL);
9841
9842         /* Scaling or underscan settings */
9843         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9844                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
9845                 update_stream_scaling_settings(
9846                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9847
9848         /* ABM settings */
9849         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9850
9851         /*
9852          * Color management settings. We also update color properties
9853          * when a modeset is needed, to ensure it gets reprogrammed.
9854          */
9855         if (dm_new_crtc_state->base.color_mgmt_changed ||
9856             dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
9857             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9858                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9859                 if (ret)
9860                         goto fail;
9861         }
9862
9863         /* Update Freesync settings. */
9864         get_freesync_config_for_crtc(dm_new_crtc_state,
9865                                      dm_new_conn_state);
9866
9867         return ret;
9868
9869 fail:
9870         if (new_stream)
9871                 dc_stream_release(new_stream);
9872         return ret;
9873 }
9874
9875 static bool should_reset_plane(struct drm_atomic_state *state,
9876                                struct drm_plane *plane,
9877                                struct drm_plane_state *old_plane_state,
9878                                struct drm_plane_state *new_plane_state)
9879 {
9880         struct drm_plane *other;
9881         struct drm_plane_state *old_other_state, *new_other_state;
9882         struct drm_crtc_state *new_crtc_state;
9883         struct amdgpu_device *adev = drm_to_adev(plane->dev);
9884         int i;
9885
9886         /*
9887          * TODO: Remove this hack for all asics once it proves that the
9888          * fast updates works fine on DCN3.2+.
9889          */
9890         if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
9891             state->allow_modeset)
9892                 return true;
9893
9894         /* Exit early if we know that we're adding or removing the plane. */
9895         if (old_plane_state->crtc != new_plane_state->crtc)
9896                 return true;
9897
9898         /* old crtc == new_crtc == NULL, plane not in context. */
9899         if (!new_plane_state->crtc)
9900                 return false;
9901
9902         new_crtc_state =
9903                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9904
9905         if (!new_crtc_state)
9906                 return true;
9907
9908         /* CRTC Degamma changes currently require us to recreate planes. */
9909         if (new_crtc_state->color_mgmt_changed)
9910                 return true;
9911
9912         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9913                 return true;
9914
9915         /*
9916          * If there are any new primary or overlay planes being added or
9917          * removed then the z-order can potentially change. To ensure
9918          * correct z-order and pipe acquisition the current DC architecture
9919          * requires us to remove and recreate all existing planes.
9920          *
9921          * TODO: Come up with a more elegant solution for this.
9922          */
9923         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9924                 struct amdgpu_framebuffer *old_afb, *new_afb;
9925                 struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
9926
9927                 dm_new_other_state = to_dm_plane_state(new_other_state);
9928                 dm_old_other_state = to_dm_plane_state(old_other_state);
9929
9930                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9931                         continue;
9932
9933                 if (old_other_state->crtc != new_plane_state->crtc &&
9934                     new_other_state->crtc != new_plane_state->crtc)
9935                         continue;
9936
9937                 if (old_other_state->crtc != new_other_state->crtc)
9938                         return true;
9939
9940                 /* Src/dst size and scaling updates. */
9941                 if (old_other_state->src_w != new_other_state->src_w ||
9942                     old_other_state->src_h != new_other_state->src_h ||
9943                     old_other_state->crtc_w != new_other_state->crtc_w ||
9944                     old_other_state->crtc_h != new_other_state->crtc_h)
9945                         return true;
9946
9947                 /* Rotation / mirroring updates. */
9948                 if (old_other_state->rotation != new_other_state->rotation)
9949                         return true;
9950
9951                 /* Blending updates. */
9952                 if (old_other_state->pixel_blend_mode !=
9953                     new_other_state->pixel_blend_mode)
9954                         return true;
9955
9956                 /* Alpha updates. */
9957                 if (old_other_state->alpha != new_other_state->alpha)
9958                         return true;
9959
9960                 /* Colorspace changes. */
9961                 if (old_other_state->color_range != new_other_state->color_range ||
9962                     old_other_state->color_encoding != new_other_state->color_encoding)
9963                         return true;
9964
9965                 /* HDR/Transfer Function changes. */
9966                 if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
9967                     dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
9968                     dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
9969                     dm_old_other_state->ctm != dm_new_other_state->ctm ||
9970                     dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
9971                     dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
9972                     dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
9973                     dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
9974                     dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
9975                         return true;
9976
9977                 /* Framebuffer checks fall at the end. */
9978                 if (!old_other_state->fb || !new_other_state->fb)
9979                         continue;
9980
9981                 /* Pixel format changes can require bandwidth updates. */
9982                 if (old_other_state->fb->format != new_other_state->fb->format)
9983                         return true;
9984
9985                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9986                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9987
9988                 /* Tiling and DCC changes also require bandwidth updates. */
9989                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9990                     old_afb->base.modifier != new_afb->base.modifier)
9991                         return true;
9992         }
9993
9994         return false;
9995 }
9996
9997 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9998                               struct drm_plane_state *new_plane_state,
9999                               struct drm_framebuffer *fb)
10000 {
10001         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10002         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10003         unsigned int pitch;
10004         bool linear;
10005
10006         if (fb->width > new_acrtc->max_cursor_width ||
10007             fb->height > new_acrtc->max_cursor_height) {
10008                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10009                                  new_plane_state->fb->width,
10010                                  new_plane_state->fb->height);
10011                 return -EINVAL;
10012         }
10013         if (new_plane_state->src_w != fb->width << 16 ||
10014             new_plane_state->src_h != fb->height << 16) {
10015                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10016                 return -EINVAL;
10017         }
10018
10019         /* Pitch in pixels */
10020         pitch = fb->pitches[0] / fb->format->cpp[0];
10021
10022         if (fb->width != pitch) {
10023                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10024                                  fb->width, pitch);
10025                 return -EINVAL;
10026         }
10027
10028         switch (pitch) {
10029         case 64:
10030         case 128:
10031         case 256:
10032                 /* FB pitch is supported by cursor plane */
10033                 break;
10034         default:
10035                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10036                 return -EINVAL;
10037         }
10038
10039         /* Core DRM takes care of checking FB modifiers, so we only need to
10040          * check tiling flags when the FB doesn't have a modifier.
10041          */
10042         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10043                 if (adev->family < AMDGPU_FAMILY_AI) {
10044                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10045                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10046                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10047                 } else {
10048                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10049                 }
10050                 if (!linear) {
10051                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10052                         return -EINVAL;
10053                 }
10054         }
10055
10056         return 0;
10057 }
10058
10059 static int dm_update_plane_state(struct dc *dc,
10060                                  struct drm_atomic_state *state,
10061                                  struct drm_plane *plane,
10062                                  struct drm_plane_state *old_plane_state,
10063                                  struct drm_plane_state *new_plane_state,
10064                                  bool enable,
10065                                  bool *lock_and_validation_needed,
10066                                  bool *is_top_most_overlay)
10067 {
10068
10069         struct dm_atomic_state *dm_state = NULL;
10070         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10071         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10072         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10073         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10074         struct amdgpu_crtc *new_acrtc;
10075         bool needs_reset;
10076         int ret = 0;
10077
10078
10079         new_plane_crtc = new_plane_state->crtc;
10080         old_plane_crtc = old_plane_state->crtc;
10081         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10082         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10083
10084         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10085                 if (!enable || !new_plane_crtc ||
10086                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10087                         return 0;
10088
10089                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10090
10091                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10092                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10093                         return -EINVAL;
10094                 }
10095
10096                 if (new_plane_state->fb) {
10097                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10098                                                  new_plane_state->fb);
10099                         if (ret)
10100                                 return ret;
10101                 }
10102
10103                 return 0;
10104         }
10105
10106         needs_reset = should_reset_plane(state, plane, old_plane_state,
10107                                          new_plane_state);
10108
10109         /* Remove any changed/removed planes */
10110         if (!enable) {
10111                 if (!needs_reset)
10112                         return 0;
10113
10114                 if (!old_plane_crtc)
10115                         return 0;
10116
10117                 old_crtc_state = drm_atomic_get_old_crtc_state(
10118                                 state, old_plane_crtc);
10119                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10120
10121                 if (!dm_old_crtc_state->stream)
10122                         return 0;
10123
10124                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10125                                 plane->base.id, old_plane_crtc->base.id);
10126
10127                 ret = dm_atomic_get_state(state, &dm_state);
10128                 if (ret)
10129                         return ret;
10130
10131                 if (!dc_state_remove_plane(
10132                                 dc,
10133                                 dm_old_crtc_state->stream,
10134                                 dm_old_plane_state->dc_state,
10135                                 dm_state->context)) {
10136
10137                         return -EINVAL;
10138                 }
10139
10140                 if (dm_old_plane_state->dc_state)
10141                         dc_plane_state_release(dm_old_plane_state->dc_state);
10142
10143                 dm_new_plane_state->dc_state = NULL;
10144
10145                 *lock_and_validation_needed = true;
10146
10147         } else { /* Add new planes */
10148                 struct dc_plane_state *dc_new_plane_state;
10149
10150                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10151                         return 0;
10152
10153                 if (!new_plane_crtc)
10154                         return 0;
10155
10156                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10157                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10158
10159                 if (!dm_new_crtc_state->stream)
10160                         return 0;
10161
10162                 if (!needs_reset)
10163                         return 0;
10164
10165                 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10166                 if (ret)
10167                         return ret;
10168
10169                 WARN_ON(dm_new_plane_state->dc_state);
10170
10171                 dc_new_plane_state = dc_create_plane_state(dc);
10172                 if (!dc_new_plane_state)
10173                         return -ENOMEM;
10174
10175                 /* Block top most plane from being a video plane */
10176                 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10177                         if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
10178                                 return -EINVAL;
10179
10180                         *is_top_most_overlay = false;
10181                 }
10182
10183                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10184                                  plane->base.id, new_plane_crtc->base.id);
10185
10186                 ret = fill_dc_plane_attributes(
10187                         drm_to_adev(new_plane_crtc->dev),
10188                         dc_new_plane_state,
10189                         new_plane_state,
10190                         new_crtc_state);
10191                 if (ret) {
10192                         dc_plane_state_release(dc_new_plane_state);
10193                         return ret;
10194                 }
10195
10196                 ret = dm_atomic_get_state(state, &dm_state);
10197                 if (ret) {
10198                         dc_plane_state_release(dc_new_plane_state);
10199                         return ret;
10200                 }
10201
10202                 /*
10203                  * Any atomic check errors that occur after this will
10204                  * not need a release. The plane state will be attached
10205                  * to the stream, and therefore part of the atomic
10206                  * state. It'll be released when the atomic state is
10207                  * cleaned.
10208                  */
10209                 if (!dc_state_add_plane(
10210                                 dc,
10211                                 dm_new_crtc_state->stream,
10212                                 dc_new_plane_state,
10213                                 dm_state->context)) {
10214
10215                         dc_plane_state_release(dc_new_plane_state);
10216                         return -EINVAL;
10217                 }
10218
10219                 dm_new_plane_state->dc_state = dc_new_plane_state;
10220
10221                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10222
10223                 /* Tell DC to do a full surface update every time there
10224                  * is a plane change. Inefficient, but works for now.
10225                  */
10226                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10227
10228                 *lock_and_validation_needed = true;
10229         }
10230
10231
10232         return ret;
10233 }
10234
10235 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10236                                        int *src_w, int *src_h)
10237 {
10238         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10239         case DRM_MODE_ROTATE_90:
10240         case DRM_MODE_ROTATE_270:
10241                 *src_w = plane_state->src_h >> 16;
10242                 *src_h = plane_state->src_w >> 16;
10243                 break;
10244         case DRM_MODE_ROTATE_0:
10245         case DRM_MODE_ROTATE_180:
10246         default:
10247                 *src_w = plane_state->src_w >> 16;
10248                 *src_h = plane_state->src_h >> 16;
10249                 break;
10250         }
10251 }
10252
10253 static void
10254 dm_get_plane_scale(struct drm_plane_state *plane_state,
10255                    int *out_plane_scale_w, int *out_plane_scale_h)
10256 {
10257         int plane_src_w, plane_src_h;
10258
10259         dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
10260         *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
10261         *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
10262 }
10263
10264 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10265                                 struct drm_crtc *crtc,
10266                                 struct drm_crtc_state *new_crtc_state)
10267 {
10268         struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
10269         struct drm_plane_state *old_plane_state, *new_plane_state;
10270         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10271         int i;
10272         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10273         bool any_relevant_change = false;
10274
10275         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10276          * cursor per pipe but it's going to inherit the scaling and
10277          * positioning from the underlying pipe. Check the cursor plane's
10278          * blending properties match the underlying planes'.
10279          */
10280
10281         /* If no plane was enabled or changed scaling, no need to check again */
10282         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10283                 int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
10284
10285                 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
10286                         continue;
10287
10288                 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
10289                         any_relevant_change = true;
10290                         break;
10291                 }
10292
10293                 if (new_plane_state->fb == old_plane_state->fb &&
10294                     new_plane_state->crtc_w == old_plane_state->crtc_w &&
10295                     new_plane_state->crtc_h == old_plane_state->crtc_h)
10296                         continue;
10297
10298                 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
10299                 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
10300
10301                 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
10302                         any_relevant_change = true;
10303                         break;
10304                 }
10305         }
10306
10307         if (!any_relevant_change)
10308                 return 0;
10309
10310         new_cursor_state = drm_atomic_get_plane_state(state, cursor);
10311         if (IS_ERR(new_cursor_state))
10312                 return PTR_ERR(new_cursor_state);
10313
10314         if (!new_cursor_state->fb)
10315                 return 0;
10316
10317         dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
10318
10319         /* Need to check all enabled planes, even if this commit doesn't change
10320          * their state
10321          */
10322         i = drm_atomic_add_affected_planes(state, crtc);
10323         if (i)
10324                 return i;
10325
10326         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10327                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10328                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10329                         continue;
10330
10331                 /* Ignore disabled planes */
10332                 if (!new_underlying_state->fb)
10333                         continue;
10334
10335                 dm_get_plane_scale(new_underlying_state,
10336                                    &underlying_scale_w, &underlying_scale_h);
10337
10338                 if (cursor_scale_w != underlying_scale_w ||
10339                     cursor_scale_h != underlying_scale_h) {
10340                         drm_dbg_atomic(crtc->dev,
10341                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10342                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10343                         return -EINVAL;
10344                 }
10345
10346                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10347                 if (new_underlying_state->crtc_x <= 0 &&
10348                     new_underlying_state->crtc_y <= 0 &&
10349                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10350                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10351                         break;
10352         }
10353
10354         return 0;
10355 }
10356
10357 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10358 {
10359         struct drm_connector *connector;
10360         struct drm_connector_state *conn_state, *old_conn_state;
10361         struct amdgpu_dm_connector *aconnector = NULL;
10362         int i;
10363
10364         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10365                 if (!conn_state->crtc)
10366                         conn_state = old_conn_state;
10367
10368                 if (conn_state->crtc != crtc)
10369                         continue;
10370
10371                 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
10372                         continue;
10373
10374                 aconnector = to_amdgpu_dm_connector(connector);
10375                 if (!aconnector->mst_output_port || !aconnector->mst_root)
10376                         aconnector = NULL;
10377                 else
10378                         break;
10379         }
10380
10381         if (!aconnector)
10382                 return 0;
10383
10384         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
10385 }
10386
10387 /**
10388  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10389  *
10390  * @dev: The DRM device
10391  * @state: The atomic state to commit
10392  *
10393  * Validate that the given atomic state is programmable by DC into hardware.
10394  * This involves constructing a &struct dc_state reflecting the new hardware
10395  * state we wish to commit, then querying DC to see if it is programmable. It's
10396  * important not to modify the existing DC state. Otherwise, atomic_check
10397  * may unexpectedly commit hardware changes.
10398  *
10399  * When validating the DC state, it's important that the right locks are
10400  * acquired. For full updates case which removes/adds/updates streams on one
10401  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10402  * that any such full update commit will wait for completion of any outstanding
10403  * flip using DRMs synchronization events.
10404  *
10405  * Note that DM adds the affected connectors for all CRTCs in state, when that
10406  * might not seem necessary. This is because DC stream creation requires the
10407  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10408  * be possible but non-trivial - a possible TODO item.
10409  *
10410  * Return: -Error code if validation failed.
10411  */
10412 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10413                                   struct drm_atomic_state *state)
10414 {
10415         struct amdgpu_device *adev = drm_to_adev(dev);
10416         struct dm_atomic_state *dm_state = NULL;
10417         struct dc *dc = adev->dm.dc;
10418         struct drm_connector *connector;
10419         struct drm_connector_state *old_con_state, *new_con_state;
10420         struct drm_crtc *crtc;
10421         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10422         struct drm_plane *plane;
10423         struct drm_plane_state *old_plane_state, *new_plane_state;
10424         enum dc_status status;
10425         int ret, i;
10426         bool lock_and_validation_needed = false;
10427         bool is_top_most_overlay = true;
10428         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10429         struct drm_dp_mst_topology_mgr *mgr;
10430         struct drm_dp_mst_topology_state *mst_state;
10431         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10432
10433         trace_amdgpu_dm_atomic_check_begin(state);
10434
10435         ret = drm_atomic_helper_check_modeset(dev, state);
10436         if (ret) {
10437                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10438                 goto fail;
10439         }
10440
10441         /* Check connector changes */
10442         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10443                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10444                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10445
10446                 /* Skip connectors that are disabled or part of modeset already. */
10447                 if (!new_con_state->crtc)
10448                         continue;
10449
10450                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10451                 if (IS_ERR(new_crtc_state)) {
10452                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10453                         ret = PTR_ERR(new_crtc_state);
10454                         goto fail;
10455                 }
10456
10457                 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
10458                     dm_old_con_state->scaling != dm_new_con_state->scaling)
10459                         new_crtc_state->connectors_changed = true;
10460         }
10461
10462         if (dc_resource_is_dsc_encoding_supported(dc)) {
10463                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10464                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10465                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10466                                 if (ret) {
10467                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10468                                         goto fail;
10469                                 }
10470                         }
10471                 }
10472         }
10473         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10474                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10475
10476                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10477                     !new_crtc_state->color_mgmt_changed &&
10478                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10479                         dm_old_crtc_state->dsc_force_changed == false)
10480                         continue;
10481
10482                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10483                 if (ret) {
10484                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10485                         goto fail;
10486                 }
10487
10488                 if (!new_crtc_state->enable)
10489                         continue;
10490
10491                 ret = drm_atomic_add_affected_connectors(state, crtc);
10492                 if (ret) {
10493                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10494                         goto fail;
10495                 }
10496
10497                 ret = drm_atomic_add_affected_planes(state, crtc);
10498                 if (ret) {
10499                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10500                         goto fail;
10501                 }
10502
10503                 if (dm_old_crtc_state->dsc_force_changed)
10504                         new_crtc_state->mode_changed = true;
10505         }
10506
10507         /*
10508          * Add all primary and overlay planes on the CRTC to the state
10509          * whenever a plane is enabled to maintain correct z-ordering
10510          * and to enable fast surface updates.
10511          */
10512         drm_for_each_crtc(crtc, dev) {
10513                 bool modified = false;
10514
10515                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10516                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10517                                 continue;
10518
10519                         if (new_plane_state->crtc == crtc ||
10520                             old_plane_state->crtc == crtc) {
10521                                 modified = true;
10522                                 break;
10523                         }
10524                 }
10525
10526                 if (!modified)
10527                         continue;
10528
10529                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10530                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
10531                                 continue;
10532
10533                         new_plane_state =
10534                                 drm_atomic_get_plane_state(state, plane);
10535
10536                         if (IS_ERR(new_plane_state)) {
10537                                 ret = PTR_ERR(new_plane_state);
10538                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10539                                 goto fail;
10540                         }
10541                 }
10542         }
10543
10544         /*
10545          * DC consults the zpos (layer_index in DC terminology) to determine the
10546          * hw plane on which to enable the hw cursor (see
10547          * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
10548          * atomic state, so call drm helper to normalize zpos.
10549          */
10550         ret = drm_atomic_normalize_zpos(dev, state);
10551         if (ret) {
10552                 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
10553                 goto fail;
10554         }
10555
10556         /* Remove exiting planes if they are modified */
10557         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10558                 if (old_plane_state->fb && new_plane_state->fb &&
10559                     get_mem_type(old_plane_state->fb) !=
10560                     get_mem_type(new_plane_state->fb))
10561                         lock_and_validation_needed = true;
10562
10563                 ret = dm_update_plane_state(dc, state, plane,
10564                                             old_plane_state,
10565                                             new_plane_state,
10566                                             false,
10567                                             &lock_and_validation_needed,
10568                                             &is_top_most_overlay);
10569                 if (ret) {
10570                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10571                         goto fail;
10572                 }
10573         }
10574
10575         /* Disable all crtcs which require disable */
10576         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10577                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10578                                            old_crtc_state,
10579                                            new_crtc_state,
10580                                            false,
10581                                            &lock_and_validation_needed);
10582                 if (ret) {
10583                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10584                         goto fail;
10585                 }
10586         }
10587
10588         /* Enable all crtcs which require enable */
10589         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10590                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10591                                            old_crtc_state,
10592                                            new_crtc_state,
10593                                            true,
10594                                            &lock_and_validation_needed);
10595                 if (ret) {
10596                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10597                         goto fail;
10598                 }
10599         }
10600
10601         /* Add new/modified planes */
10602         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10603                 ret = dm_update_plane_state(dc, state, plane,
10604                                             old_plane_state,
10605                                             new_plane_state,
10606                                             true,
10607                                             &lock_and_validation_needed,
10608                                             &is_top_most_overlay);
10609                 if (ret) {
10610                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10611                         goto fail;
10612                 }
10613         }
10614
10615         if (dc_resource_is_dsc_encoding_supported(dc)) {
10616                 ret = pre_validate_dsc(state, &dm_state, vars);
10617                 if (ret != 0)
10618                         goto fail;
10619         }
10620
10621         /* Run this here since we want to validate the streams we created */
10622         ret = drm_atomic_helper_check_planes(dev, state);
10623         if (ret) {
10624                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
10625                 goto fail;
10626         }
10627
10628         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10629                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10630                 if (dm_new_crtc_state->mpo_requested)
10631                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10632         }
10633
10634         /* Check cursor planes scaling */
10635         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10636                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10637                 if (ret) {
10638                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
10639                         goto fail;
10640                 }
10641         }
10642
10643         if (state->legacy_cursor_update) {
10644                 /*
10645                  * This is a fast cursor update coming from the plane update
10646                  * helper, check if it can be done asynchronously for better
10647                  * performance.
10648                  */
10649                 state->async_update =
10650                         !drm_atomic_helper_async_check(dev, state);
10651
10652                 /*
10653                  * Skip the remaining global validation if this is an async
10654                  * update. Cursor updates can be done without affecting
10655                  * state or bandwidth calcs and this avoids the performance
10656                  * penalty of locking the private state object and
10657                  * allocating a new dc_state.
10658                  */
10659                 if (state->async_update)
10660                         return 0;
10661         }
10662
10663         /* Check scaling and underscan changes*/
10664         /* TODO Removed scaling changes validation due to inability to commit
10665          * new stream into context w\o causing full reset. Need to
10666          * decide how to handle.
10667          */
10668         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10669                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10670                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10671                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10672
10673                 /* Skip any modesets/resets */
10674                 if (!acrtc || drm_atomic_crtc_needs_modeset(
10675                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10676                         continue;
10677
10678                 /* Skip any thing not scale or underscan changes */
10679                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10680                         continue;
10681
10682                 lock_and_validation_needed = true;
10683         }
10684
10685         /* set the slot info for each mst_state based on the link encoding format */
10686         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10687                 struct amdgpu_dm_connector *aconnector;
10688                 struct drm_connector *connector;
10689                 struct drm_connector_list_iter iter;
10690                 u8 link_coding_cap;
10691
10692                 drm_connector_list_iter_begin(dev, &iter);
10693                 drm_for_each_connector_iter(connector, &iter) {
10694                         if (connector->index == mst_state->mgr->conn_base_id) {
10695                                 aconnector = to_amdgpu_dm_connector(connector);
10696                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10697                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10698
10699                                 break;
10700                         }
10701                 }
10702                 drm_connector_list_iter_end(&iter);
10703         }
10704
10705         /**
10706          * Streams and planes are reset when there are changes that affect
10707          * bandwidth. Anything that affects bandwidth needs to go through
10708          * DC global validation to ensure that the configuration can be applied
10709          * to hardware.
10710          *
10711          * We have to currently stall out here in atomic_check for outstanding
10712          * commits to finish in this case because our IRQ handlers reference
10713          * DRM state directly - we can end up disabling interrupts too early
10714          * if we don't.
10715          *
10716          * TODO: Remove this stall and drop DM state private objects.
10717          */
10718         if (lock_and_validation_needed) {
10719                 ret = dm_atomic_get_state(state, &dm_state);
10720                 if (ret) {
10721                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
10722                         goto fail;
10723                 }
10724
10725                 ret = do_aquire_global_lock(dev, state);
10726                 if (ret) {
10727                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
10728                         goto fail;
10729                 }
10730
10731                 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10732                 if (ret) {
10733                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
10734                         ret = -EINVAL;
10735                         goto fail;
10736                 }
10737
10738                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10739                 if (ret) {
10740                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
10741                         goto fail;
10742                 }
10743
10744                 /*
10745                  * Perform validation of MST topology in the state:
10746                  * We need to perform MST atomic check before calling
10747                  * dc_validate_global_state(), or there is a chance
10748                  * to get stuck in an infinite loop and hang eventually.
10749                  */
10750                 ret = drm_dp_mst_atomic_check(state);
10751                 if (ret) {
10752                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
10753                         goto fail;
10754                 }
10755                 status = dc_validate_global_state(dc, dm_state->context, false);
10756                 if (status != DC_OK) {
10757                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
10758                                        dc_status_to_str(status), status);
10759                         ret = -EINVAL;
10760                         goto fail;
10761                 }
10762         } else {
10763                 /*
10764                  * The commit is a fast update. Fast updates shouldn't change
10765                  * the DC context, affect global validation, and can have their
10766                  * commit work done in parallel with other commits not touching
10767                  * the same resource. If we have a new DC context as part of
10768                  * the DM atomic state from validation we need to free it and
10769                  * retain the existing one instead.
10770                  *
10771                  * Furthermore, since the DM atomic state only contains the DC
10772                  * context and can safely be annulled, we can free the state
10773                  * and clear the associated private object now to free
10774                  * some memory and avoid a possible use-after-free later.
10775                  */
10776
10777                 for (i = 0; i < state->num_private_objs; i++) {
10778                         struct drm_private_obj *obj = state->private_objs[i].ptr;
10779
10780                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
10781                                 int j = state->num_private_objs-1;
10782
10783                                 dm_atomic_destroy_state(obj,
10784                                                 state->private_objs[i].state);
10785
10786                                 /* If i is not at the end of the array then the
10787                                  * last element needs to be moved to where i was
10788                                  * before the array can safely be truncated.
10789                                  */
10790                                 if (i != j)
10791                                         state->private_objs[i] =
10792                                                 state->private_objs[j];
10793
10794                                 state->private_objs[j].ptr = NULL;
10795                                 state->private_objs[j].state = NULL;
10796                                 state->private_objs[j].old_state = NULL;
10797                                 state->private_objs[j].new_state = NULL;
10798
10799                                 state->num_private_objs = j;
10800                                 break;
10801                         }
10802                 }
10803         }
10804
10805         /* Store the overall update type for use later in atomic check. */
10806         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10807                 struct dm_crtc_state *dm_new_crtc_state =
10808                         to_dm_crtc_state(new_crtc_state);
10809
10810                 /*
10811                  * Only allow async flips for fast updates that don't change
10812                  * the FB pitch, the DCC state, rotation, etc.
10813                  */
10814                 if (new_crtc_state->async_flip && lock_and_validation_needed) {
10815                         drm_dbg_atomic(crtc->dev,
10816                                        "[CRTC:%d:%s] async flips are only supported for fast updates\n",
10817                                        crtc->base.id, crtc->name);
10818                         ret = -EINVAL;
10819                         goto fail;
10820                 }
10821
10822                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10823                         UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
10824         }
10825
10826         /* Must be success */
10827         WARN_ON(ret);
10828
10829         trace_amdgpu_dm_atomic_check_finish(state, ret);
10830
10831         return ret;
10832
10833 fail:
10834         if (ret == -EDEADLK)
10835                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10836         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10837                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10838         else
10839                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
10840
10841         trace_amdgpu_dm_atomic_check_finish(state, ret);
10842
10843         return ret;
10844 }
10845
10846 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10847                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
10848 {
10849         u8 dpcd_data;
10850         bool capable = false;
10851
10852         if (amdgpu_dm_connector->dc_link &&
10853                 dm_helpers_dp_read_dpcd(
10854                                 NULL,
10855                                 amdgpu_dm_connector->dc_link,
10856                                 DP_DOWN_STREAM_PORT_COUNT,
10857                                 &dpcd_data,
10858                                 sizeof(dpcd_data))) {
10859                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10860         }
10861
10862         return capable;
10863 }
10864
10865 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10866                 unsigned int offset,
10867                 unsigned int total_length,
10868                 u8 *data,
10869                 unsigned int length,
10870                 struct amdgpu_hdmi_vsdb_info *vsdb)
10871 {
10872         bool res;
10873         union dmub_rb_cmd cmd;
10874         struct dmub_cmd_send_edid_cea *input;
10875         struct dmub_cmd_edid_cea_output *output;
10876
10877         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10878                 return false;
10879
10880         memset(&cmd, 0, sizeof(cmd));
10881
10882         input = &cmd.edid_cea.data.input;
10883
10884         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10885         cmd.edid_cea.header.sub_type = 0;
10886         cmd.edid_cea.header.payload_bytes =
10887                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10888         input->offset = offset;
10889         input->length = length;
10890         input->cea_total_length = total_length;
10891         memcpy(input->payload, data, length);
10892
10893         res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
10894         if (!res) {
10895                 DRM_ERROR("EDID CEA parser failed\n");
10896                 return false;
10897         }
10898
10899         output = &cmd.edid_cea.data.output;
10900
10901         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10902                 if (!output->ack.success) {
10903                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
10904                                         output->ack.offset);
10905                 }
10906         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10907                 if (!output->amd_vsdb.vsdb_found)
10908                         return false;
10909
10910                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10911                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10912                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10913                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10914         } else {
10915                 DRM_WARN("Unknown EDID CEA parser results\n");
10916                 return false;
10917         }
10918
10919         return true;
10920 }
10921
10922 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10923                 u8 *edid_ext, int len,
10924                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10925 {
10926         int i;
10927
10928         /* send extension block to DMCU for parsing */
10929         for (i = 0; i < len; i += 8) {
10930                 bool res;
10931                 int offset;
10932
10933                 /* send 8 bytes a time */
10934                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10935                         return false;
10936
10937                 if (i+8 == len) {
10938                         /* EDID block sent completed, expect result */
10939                         int version, min_rate, max_rate;
10940
10941                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10942                         if (res) {
10943                                 /* amd vsdb found */
10944                                 vsdb_info->freesync_supported = 1;
10945                                 vsdb_info->amd_vsdb_version = version;
10946                                 vsdb_info->min_refresh_rate_hz = min_rate;
10947                                 vsdb_info->max_refresh_rate_hz = max_rate;
10948                                 return true;
10949                         }
10950                         /* not amd vsdb */
10951                         return false;
10952                 }
10953
10954                 /* check for ack*/
10955                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10956                 if (!res)
10957                         return false;
10958         }
10959
10960         return false;
10961 }
10962
10963 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10964                 u8 *edid_ext, int len,
10965                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10966 {
10967         int i;
10968
10969         /* send extension block to DMCU for parsing */
10970         for (i = 0; i < len; i += 8) {
10971                 /* send 8 bytes a time */
10972                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10973                         return false;
10974         }
10975
10976         return vsdb_info->freesync_supported;
10977 }
10978
10979 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10980                 u8 *edid_ext, int len,
10981                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10982 {
10983         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10984         bool ret;
10985
10986         mutex_lock(&adev->dm.dc_lock);
10987         if (adev->dm.dmub_srv)
10988                 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10989         else
10990                 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10991         mutex_unlock(&adev->dm.dc_lock);
10992         return ret;
10993 }
10994
10995 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10996                           struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10997 {
10998         u8 *edid_ext = NULL;
10999         int i;
11000         int j = 0;
11001
11002         if (edid == NULL || edid->extensions == 0)
11003                 return -ENODEV;
11004
11005         /* Find DisplayID extension */
11006         for (i = 0; i < edid->extensions; i++) {
11007                 edid_ext = (void *)(edid + (i + 1));
11008                 if (edid_ext[0] == DISPLAYID_EXT)
11009                         break;
11010         }
11011
11012         while (j < EDID_LENGTH) {
11013                 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
11014                 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
11015
11016                 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
11017                                 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
11018                         vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
11019                         vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
11020                         DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
11021
11022                         return true;
11023                 }
11024                 j++;
11025         }
11026
11027         return false;
11028 }
11029
11030 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11031                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11032 {
11033         u8 *edid_ext = NULL;
11034         int i;
11035         bool valid_vsdb_found = false;
11036
11037         /*----- drm_find_cea_extension() -----*/
11038         /* No EDID or EDID extensions */
11039         if (edid == NULL || edid->extensions == 0)
11040                 return -ENODEV;
11041
11042         /* Find CEA extension */
11043         for (i = 0; i < edid->extensions; i++) {
11044                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11045                 if (edid_ext[0] == CEA_EXT)
11046                         break;
11047         }
11048
11049         if (i == edid->extensions)
11050                 return -ENODEV;
11051
11052         /*----- cea_db_offsets() -----*/
11053         if (edid_ext[0] != CEA_EXT)
11054                 return -ENODEV;
11055
11056         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11057
11058         return valid_vsdb_found ? i : -ENODEV;
11059 }
11060
11061 /**
11062  * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
11063  *
11064  * @connector: Connector to query.
11065  * @edid: EDID from monitor
11066  *
11067  * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
11068  * track of some of the display information in the internal data struct used by
11069  * amdgpu_dm. This function checks which type of connector we need to set the
11070  * FreeSync parameters.
11071  */
11072 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11073                                     struct edid *edid)
11074 {
11075         int i = 0;
11076         struct detailed_timing *timing;
11077         struct detailed_non_pixel *data;
11078         struct detailed_data_monitor_range *range;
11079         struct amdgpu_dm_connector *amdgpu_dm_connector =
11080                         to_amdgpu_dm_connector(connector);
11081         struct dm_connector_state *dm_con_state = NULL;
11082         struct dc_sink *sink;
11083
11084         struct amdgpu_device *adev = drm_to_adev(connector->dev);
11085         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11086         bool freesync_capable = false;
11087         enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
11088
11089         if (!connector->state) {
11090                 DRM_ERROR("%s - Connector has no state", __func__);
11091                 goto update;
11092         }
11093
11094         sink = amdgpu_dm_connector->dc_sink ?
11095                 amdgpu_dm_connector->dc_sink :
11096                 amdgpu_dm_connector->dc_em_sink;
11097
11098         if (!edid || !sink) {
11099                 dm_con_state = to_dm_connector_state(connector->state);
11100
11101                 amdgpu_dm_connector->min_vfreq = 0;
11102                 amdgpu_dm_connector->max_vfreq = 0;
11103                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11104                 connector->display_info.monitor_range.min_vfreq = 0;
11105                 connector->display_info.monitor_range.max_vfreq = 0;
11106                 freesync_capable = false;
11107
11108                 goto update;
11109         }
11110
11111         dm_con_state = to_dm_connector_state(connector->state);
11112
11113         if (!adev->dm.freesync_module)
11114                 goto update;
11115
11116         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11117                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11118                 bool edid_check_required = false;
11119
11120                 if (edid) {
11121                         edid_check_required = is_dp_capable_without_timing_msa(
11122                                                 adev->dm.dc,
11123                                                 amdgpu_dm_connector);
11124                 }
11125
11126                 if (edid_check_required == true && (edid->version > 1 ||
11127                    (edid->version == 1 && edid->revision > 1))) {
11128                         for (i = 0; i < 4; i++) {
11129
11130                                 timing  = &edid->detailed_timings[i];
11131                                 data    = &timing->data.other_data;
11132                                 range   = &data->data.range;
11133                                 /*
11134                                  * Check if monitor has continuous frequency mode
11135                                  */
11136                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11137                                         continue;
11138                                 /*
11139                                  * Check for flag range limits only. If flag == 1 then
11140                                  * no additional timing information provided.
11141                                  * Default GTF, GTF Secondary curve and CVT are not
11142                                  * supported
11143                                  */
11144                                 if (range->flags != 1)
11145                                         continue;
11146
11147                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11148                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11149                                 amdgpu_dm_connector->pixel_clock_mhz =
11150                                         range->pixel_clock_mhz * 10;
11151
11152                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11153                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11154
11155                                 break;
11156                         }
11157
11158                         if (amdgpu_dm_connector->max_vfreq -
11159                             amdgpu_dm_connector->min_vfreq > 10) {
11160
11161                                 freesync_capable = true;
11162                         }
11163                 }
11164                 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11165
11166                 if (vsdb_info.replay_mode) {
11167                         amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
11168                         amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
11169                         amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
11170                 }
11171
11172         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11173                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11174                 if (i >= 0 && vsdb_info.freesync_supported) {
11175                         timing  = &edid->detailed_timings[i];
11176                         data    = &timing->data.other_data;
11177
11178                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11179                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11180                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11181                                 freesync_capable = true;
11182
11183                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11184                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11185                 }
11186         }
11187
11188         as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
11189
11190         if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
11191                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11192                 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
11193
11194                         amdgpu_dm_connector->pack_sdp_v1_3 = true;
11195                         amdgpu_dm_connector->as_type = as_type;
11196                         amdgpu_dm_connector->vsdb_info = vsdb_info;
11197
11198                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11199                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11200                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11201                                 freesync_capable = true;
11202
11203                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11204                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11205                 }
11206         }
11207
11208 update:
11209         if (dm_con_state)
11210                 dm_con_state->freesync_capable = freesync_capable;
11211
11212         if (connector->vrr_capable_property)
11213                 drm_connector_set_vrr_capable_property(connector,
11214                                                        freesync_capable);
11215 }
11216
11217 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11218 {
11219         struct amdgpu_device *adev = drm_to_adev(dev);
11220         struct dc *dc = adev->dm.dc;
11221         int i;
11222
11223         mutex_lock(&adev->dm.dc_lock);
11224         if (dc->current_state) {
11225                 for (i = 0; i < dc->current_state->stream_count; ++i)
11226                         dc->current_state->streams[i]
11227                                 ->triggered_crtc_reset.enabled =
11228                                 adev->dm.force_timing_sync;
11229
11230                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11231                 dc_trigger_sync(dc, dc->current_state);
11232         }
11233         mutex_unlock(&adev->dm.dc_lock);
11234 }
11235
11236 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11237                        u32 value, const char *func_name)
11238 {
11239 #ifdef DM_CHECK_ADDR_0
11240         if (address == 0) {
11241                 drm_err(adev_to_drm(ctx->driver_context),
11242                         "invalid register write. address = 0");
11243                 return;
11244         }
11245 #endif
11246         cgs_write_register(ctx->cgs_device, address, value);
11247         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11248 }
11249
11250 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11251                           const char *func_name)
11252 {
11253         u32 value;
11254 #ifdef DM_CHECK_ADDR_0
11255         if (address == 0) {
11256                 drm_err(adev_to_drm(ctx->driver_context),
11257                         "invalid register read; address = 0\n");
11258                 return 0;
11259         }
11260 #endif
11261
11262         if (ctx->dmub_srv &&
11263             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11264             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11265                 ASSERT(false);
11266                 return 0;
11267         }
11268
11269         value = cgs_read_register(ctx->cgs_device, address);
11270
11271         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11272
11273         return value;
11274 }
11275
11276 int amdgpu_dm_process_dmub_aux_transfer_sync(
11277                 struct dc_context *ctx,
11278                 unsigned int link_index,
11279                 struct aux_payload *payload,
11280                 enum aux_return_code_type *operation_result)
11281 {
11282         struct amdgpu_device *adev = ctx->driver_context;
11283         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11284         int ret = -1;
11285
11286         mutex_lock(&adev->dm.dpia_aux_lock);
11287         if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
11288                 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11289                 goto out;
11290         }
11291
11292         if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
11293                 DRM_ERROR("wait_for_completion_timeout timeout!");
11294                 *operation_result = AUX_RET_ERROR_TIMEOUT;
11295                 goto out;
11296         }
11297
11298         if (p_notify->result != AUX_RET_SUCCESS) {
11299                 /*
11300                  * Transient states before tunneling is enabled could
11301                  * lead to this error. We can ignore this for now.
11302                  */
11303                 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
11304                         DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
11305                                         payload->address, payload->length,
11306                                         p_notify->result);
11307                 }
11308                 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
11309                 goto out;
11310         }
11311
11312
11313         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11314         if (!payload->write && p_notify->aux_reply.length &&
11315                         (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
11316
11317                 if (payload->length != p_notify->aux_reply.length) {
11318                         DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
11319                                 p_notify->aux_reply.length,
11320                                         payload->address, payload->length);
11321                         *operation_result = AUX_RET_ERROR_INVALID_REPLY;
11322                         goto out;
11323                 }
11324
11325                 memcpy(payload->data, p_notify->aux_reply.data,
11326                                 p_notify->aux_reply.length);
11327         }
11328
11329         /* success */
11330         ret = p_notify->aux_reply.length;
11331         *operation_result = p_notify->result;
11332 out:
11333         reinit_completion(&adev->dm.dmub_aux_transfer_done);
11334         mutex_unlock(&adev->dm.dpia_aux_lock);
11335         return ret;
11336 }
11337
11338 int amdgpu_dm_process_dmub_set_config_sync(
11339                 struct dc_context *ctx,
11340                 unsigned int link_index,
11341                 struct set_config_cmd_payload *payload,
11342                 enum set_config_status *operation_result)
11343 {
11344         struct amdgpu_device *adev = ctx->driver_context;
11345         bool is_cmd_complete;
11346         int ret;
11347
11348         mutex_lock(&adev->dm.dpia_aux_lock);
11349         is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
11350                         link_index, payload, adev->dm.dmub_notify);
11351
11352         if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
11353                 ret = 0;
11354                 *operation_result = adev->dm.dmub_notify->sc_status;
11355         } else {
11356                 DRM_ERROR("wait_for_completion_timeout timeout!");
11357                 ret = -1;
11358                 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11359         }
11360
11361         if (!is_cmd_complete)
11362                 reinit_completion(&adev->dm.dmub_aux_transfer_done);
11363         mutex_unlock(&adev->dm.dpia_aux_lock);
11364         return ret;
11365 }
11366
11367 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11368 {
11369         return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
11370 }
11371
11372 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11373 {
11374         return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
11375 }