fix short copy handling in copy_mc_pipe_to_iter()
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
118 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
120
121 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
123
124 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126
127 /* Number of bytes in PSP header for firmware. */
128 #define PSP_HEADER_BYTES 0x100
129
130 /* Number of bytes in PSP footer for firmware. */
131 #define PSP_FOOTER_BYTES 0x100
132
133 /**
134  * DOC: overview
135  *
136  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
137  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
138  * requests into DC requests, and DC responses into DRM responses.
139  *
140  * The root control structure is &struct amdgpu_display_manager.
141  */
142
143 /* basic init/fini API */
144 static int amdgpu_dm_init(struct amdgpu_device *adev);
145 static void amdgpu_dm_fini(struct amdgpu_device *adev);
146 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
147
148 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149 {
150         switch (link->dpcd_caps.dongle_type) {
151         case DISPLAY_DONGLE_NONE:
152                 return DRM_MODE_SUBCONNECTOR_Native;
153         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154                 return DRM_MODE_SUBCONNECTOR_VGA;
155         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156         case DISPLAY_DONGLE_DP_DVI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_DVID;
158         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160                 return DRM_MODE_SUBCONNECTOR_HDMIA;
161         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162         default:
163                 return DRM_MODE_SUBCONNECTOR_Unknown;
164         }
165 }
166
167 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168 {
169         struct dc_link *link = aconnector->dc_link;
170         struct drm_connector *connector = &aconnector->base;
171         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172
173         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174                 return;
175
176         if (aconnector->dc_sink)
177                 subconnector = get_subconnector_type(link);
178
179         drm_object_property_set_value(&connector->base,
180                         connector->dev->mode_config.dp_subconnector_property,
181                         subconnector);
182 }
183
184 /*
185  * initializes drm_device display related structures, based on the information
186  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187  * drm_encoder, drm_mode_config
188  *
189  * Returns 0 on success
190  */
191 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192 /* removes and deallocates the drm structures, created by the above function */
193 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194
195 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
196                                 struct drm_plane *plane,
197                                 unsigned long possible_crtcs,
198                                 const struct dc_plane_cap *plane_cap);
199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200                                struct drm_plane *plane,
201                                uint32_t link_index);
202 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
204                                     uint32_t link_index,
205                                     struct amdgpu_encoder *amdgpu_encoder);
206 static int amdgpu_dm_encoder_init(struct drm_device *dev,
207                                   struct amdgpu_encoder *aencoder,
208                                   uint32_t link_index);
209
210 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211
212 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213
214 static int amdgpu_dm_atomic_check(struct drm_device *dev,
215                                   struct drm_atomic_state *state);
216
217 static void handle_cursor_update(struct drm_plane *plane,
218                                  struct drm_plane_state *old_plane_state);
219
220 static const struct drm_format_info *
221 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222
223 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
224 static void handle_hpd_rx_irq(void *param);
225
226 static bool
227 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228                                  struct drm_crtc_state *new_crtc_state);
229 /*
230  * dm_vblank_get_counter
231  *
232  * @brief
233  * Get counter for number of vertical blanks
234  *
235  * @param
236  * struct amdgpu_device *adev - [in] desired amdgpu device
237  * int disp_idx - [in] which CRTC to get the counter from
238  *
239  * @return
240  * Counter for vertical blanks
241  */
242 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244         if (crtc >= adev->mode_info.num_crtc)
245                 return 0;
246         else {
247                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248
249                 if (acrtc->dm_irq_params.stream == NULL) {
250                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251                                   crtc);
252                         return 0;
253                 }
254
255                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256         }
257 }
258
259 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260                                   u32 *vbl, u32 *position)
261 {
262         uint32_t v_blank_start, v_blank_end, h_position, v_position;
263
264         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265                 return -EINVAL;
266         else {
267                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268
269                 if (acrtc->dm_irq_params.stream ==  NULL) {
270                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271                                   crtc);
272                         return 0;
273                 }
274
275                 /*
276                  * TODO rework base driver to use values directly.
277                  * for now parse it back into reg-format
278                  */
279                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280                                          &v_blank_start,
281                                          &v_blank_end,
282                                          &h_position,
283                                          &v_position);
284
285                 *position = v_position | (h_position << 16);
286                 *vbl = v_blank_start | (v_blank_end << 16);
287         }
288
289         return 0;
290 }
291
292 static bool dm_is_idle(void *handle)
293 {
294         /* XXX todo */
295         return true;
296 }
297
298 static int dm_wait_for_idle(void *handle)
299 {
300         /* XXX todo */
301         return 0;
302 }
303
304 static bool dm_check_soft_reset(void *handle)
305 {
306         return false;
307 }
308
309 static int dm_soft_reset(void *handle)
310 {
311         /* XXX todo */
312         return 0;
313 }
314
315 static struct amdgpu_crtc *
316 get_crtc_by_otg_inst(struct amdgpu_device *adev,
317                      int otg_inst)
318 {
319         struct drm_device *dev = adev_to_drm(adev);
320         struct drm_crtc *crtc;
321         struct amdgpu_crtc *amdgpu_crtc;
322
323         if (WARN_ON(otg_inst == -1))
324                 return adev->mode_info.crtcs[0];
325
326         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327                 amdgpu_crtc = to_amdgpu_crtc(crtc);
328
329                 if (amdgpu_crtc->otg_inst == otg_inst)
330                         return amdgpu_crtc;
331         }
332
333         return NULL;
334 }
335
336 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337 {
338         return acrtc->dm_irq_params.freesync_config.state ==
339                        VRR_STATE_ACTIVE_VARIABLE ||
340                acrtc->dm_irq_params.freesync_config.state ==
341                        VRR_STATE_ACTIVE_FIXED;
342 }
343
344 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345 {
346         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 }
349
350 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351                                               struct dm_crtc_state *new_state)
352 {
353         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
354                 return true;
355         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356                 return true;
357         else
358                 return false;
359 }
360
361 /**
362  * dm_pflip_high_irq() - Handle pageflip interrupt
363  * @interrupt_params: ignored
364  *
365  * Handles the pageflip interrupt by notifying all interested parties
366  * that the pageflip has been completed.
367  */
368 static void dm_pflip_high_irq(void *interrupt_params)
369 {
370         struct amdgpu_crtc *amdgpu_crtc;
371         struct common_irq_params *irq_params = interrupt_params;
372         struct amdgpu_device *adev = irq_params->adev;
373         unsigned long flags;
374         struct drm_pending_vblank_event *e;
375         uint32_t vpos, hpos, v_blank_start, v_blank_end;
376         bool vrr_active;
377
378         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379
380         /* IRQ could occur when in initial stage */
381         /* TODO work and BO cleanup */
382         if (amdgpu_crtc == NULL) {
383                 DC_LOG_PFLIP("CRTC is null, returning.\n");
384                 return;
385         }
386
387         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
388
389         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
390                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
391                                                  amdgpu_crtc->pflip_status,
392                                                  AMDGPU_FLIP_SUBMITTED,
393                                                  amdgpu_crtc->crtc_id,
394                                                  amdgpu_crtc);
395                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396                 return;
397         }
398
399         /* page flip completed. */
400         e = amdgpu_crtc->event;
401         amdgpu_crtc->event = NULL;
402
403         WARN_ON(!e);
404
405         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
406
407         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
408         if (!vrr_active ||
409             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
410                                       &v_blank_end, &hpos, &vpos) ||
411             (vpos < v_blank_start)) {
412                 /* Update to correct count and vblank timestamp if racing with
413                  * vblank irq. This also updates to the correct vblank timestamp
414                  * even in VRR mode, as scanout is past the front-porch atm.
415                  */
416                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
417
418                 /* Wake up userspace by sending the pageflip event with proper
419                  * count and timestamp of vblank of flip completion.
420                  */
421                 if (e) {
422                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423
424                         /* Event sent, so done with vblank for this flip */
425                         drm_crtc_vblank_put(&amdgpu_crtc->base);
426                 }
427         } else if (e) {
428                 /* VRR active and inside front-porch: vblank count and
429                  * timestamp for pageflip event will only be up to date after
430                  * drm_crtc_handle_vblank() has been executed from late vblank
431                  * irq handler after start of back-porch (vline 0). We queue the
432                  * pageflip event for send-out by drm_crtc_handle_vblank() with
433                  * updated timestamp and count, once it runs after us.
434                  *
435                  * We need to open-code this instead of using the helper
436                  * drm_crtc_arm_vblank_event(), as that helper would
437                  * call drm_crtc_accurate_vblank_count(), which we must
438                  * not call in VRR mode while we are in front-porch!
439                  */
440
441                 /* sequence will be replaced by real count during send-out. */
442                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443                 e->pipe = amdgpu_crtc->crtc_id;
444
445                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446                 e = NULL;
447         }
448
449         /* Keep track of vblank of this flip for flip throttling. We use the
450          * cooked hw counter, as that one incremented at start of this vblank
451          * of pageflip completion, so last_flip_vblank is the forbidden count
452          * for queueing new pageflips if vsync + VRR is enabled.
453          */
454         amdgpu_crtc->dm_irq_params.last_flip_vblank =
455                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
456
457         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
458         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
459
460         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461                      amdgpu_crtc->crtc_id, amdgpu_crtc,
462                      vrr_active, (int) !e);
463 }
464
465 static void dm_vupdate_high_irq(void *interrupt_params)
466 {
467         struct common_irq_params *irq_params = interrupt_params;
468         struct amdgpu_device *adev = irq_params->adev;
469         struct amdgpu_crtc *acrtc;
470         struct drm_device *drm_dev;
471         struct drm_vblank_crtc *vblank;
472         ktime_t frame_duration_ns, previous_timestamp;
473         unsigned long flags;
474         int vrr_active;
475
476         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477
478         if (acrtc) {
479                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
480                 drm_dev = acrtc->base.dev;
481                 vblank = &drm_dev->vblank[acrtc->base.index];
482                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483                 frame_duration_ns = vblank->time - previous_timestamp;
484
485                 if (frame_duration_ns > 0) {
486                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
487                                                 frame_duration_ns,
488                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
490                 }
491
492                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493                               acrtc->crtc_id,
494                               vrr_active);
495
496                 /* Core vblank handling is done here after end of front-porch in
497                  * vrr mode, as vblank timestamping will give valid results
498                  * while now done after front-porch. This will also deliver
499                  * page-flip completion events that have been queued to us
500                  * if a pageflip happened inside front-porch.
501                  */
502                 if (vrr_active) {
503                         drm_crtc_handle_vblank(&acrtc->base);
504
505                         /* BTR processing for pre-DCE12 ASICs */
506                         if (acrtc->dm_irq_params.stream &&
507                             adev->family < AMDGPU_FAMILY_AI) {
508                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
509                                 mod_freesync_handle_v_update(
510                                     adev->dm.freesync_module,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params);
513
514                                 dc_stream_adjust_vmin_vmax(
515                                     adev->dm.dc,
516                                     acrtc->dm_irq_params.stream,
517                                     &acrtc->dm_irq_params.vrr_params.adjust);
518                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
519                         }
520                 }
521         }
522 }
523
524 /**
525  * dm_crtc_high_irq() - Handles CRTC interrupt
526  * @interrupt_params: used for determining the CRTC instance
527  *
528  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529  * event handler.
530  */
531 static void dm_crtc_high_irq(void *interrupt_params)
532 {
533         struct common_irq_params *irq_params = interrupt_params;
534         struct amdgpu_device *adev = irq_params->adev;
535         struct amdgpu_crtc *acrtc;
536         unsigned long flags;
537         int vrr_active;
538
539         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540         if (!acrtc)
541                 return;
542
543         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
544
545         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
546                       vrr_active, acrtc->dm_irq_params.active_planes);
547
548         /**
549          * Core vblank handling at start of front-porch is only possible
550          * in non-vrr mode, as only there vblank timestamping will give
551          * valid results while done in front-porch. Otherwise defer it
552          * to dm_vupdate_high_irq after end of front-porch.
553          */
554         if (!vrr_active)
555                 drm_crtc_handle_vblank(&acrtc->base);
556
557         /**
558          * Following stuff must happen at start of vblank, for crc
559          * computation and below-the-range btr support in vrr mode.
560          */
561         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
562
563         /* BTR updates need to happen before VUPDATE on Vega and above. */
564         if (adev->family < AMDGPU_FAMILY_AI)
565                 return;
566
567         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
568
569         if (acrtc->dm_irq_params.stream &&
570             acrtc->dm_irq_params.vrr_params.supported &&
571             acrtc->dm_irq_params.freesync_config.state ==
572                     VRR_STATE_ACTIVE_VARIABLE) {
573                 mod_freesync_handle_v_update(adev->dm.freesync_module,
574                                              acrtc->dm_irq_params.stream,
575                                              &acrtc->dm_irq_params.vrr_params);
576
577                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578                                            &acrtc->dm_irq_params.vrr_params.adjust);
579         }
580
581         /*
582          * If there aren't any active_planes then DCH HUBP may be clock-gated.
583          * In that case, pageflip completion interrupts won't fire and pageflip
584          * completion events won't get delivered. Prevent this by sending
585          * pending pageflip events from here if a flip is still pending.
586          *
587          * If any planes are enabled, use dm_pflip_high_irq() instead, to
588          * avoid race conditions between flip programming and completion,
589          * which could cause too early flip completion events.
590          */
591         if (adev->family >= AMDGPU_FAMILY_RV &&
592             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
593             acrtc->dm_irq_params.active_planes == 0) {
594                 if (acrtc->event) {
595                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596                         acrtc->event = NULL;
597                         drm_crtc_vblank_put(&acrtc->base);
598                 }
599                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
600         }
601
602         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 }
604
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615         struct common_irq_params *irq_params = interrupt_params;
616         struct amdgpu_device *adev = irq_params->adev;
617         struct amdgpu_crtc *acrtc;
618
619         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620
621         if (!acrtc)
622                 return;
623
624         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638                                         struct dmub_notification *notify)
639 {
640         if (adev->dm.dmub_notify)
641                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643                 complete(&adev->dm.dmub_aux_transfer_done);
644 }
645
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655                               struct dmub_notification *notify)
656 {
657         struct amdgpu_dm_connector *aconnector;
658         struct amdgpu_dm_connector *hpd_aconnector = NULL;
659         struct drm_connector *connector;
660         struct drm_connector_list_iter iter;
661         struct dc_link *link;
662         uint8_t link_index = 0;
663         struct drm_device *dev;
664
665         if (adev == NULL)
666                 return;
667
668         if (notify == NULL) {
669                 DRM_ERROR("DMUB HPD callback notification was NULL");
670                 return;
671         }
672
673         if (notify->link_index > adev->dm.dc->link_count) {
674                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675                 return;
676         }
677
678         link_index = notify->link_index;
679         link = adev->dm.dc->links[link_index];
680         dev = adev->dm.ddev;
681
682         drm_connector_list_iter_begin(dev, &iter);
683         drm_for_each_connector_iter(connector, &iter) {
684                 aconnector = to_amdgpu_dm_connector(connector);
685                 if (link && aconnector->dc_link == link) {
686                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687                         hpd_aconnector = aconnector;
688                         break;
689                 }
690         }
691         drm_connector_list_iter_end(&iter);
692
693         if (hpd_aconnector) {
694                 if (notify->type == DMUB_NOTIFICATION_HPD)
695                         handle_hpd_irq_helper(hpd_aconnector);
696                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697                         handle_hpd_rx_irq(hpd_aconnector);
698         }
699 }
700
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714                                           enum dmub_notification_type type,
715                                           dmub_notify_interrupt_callback_t callback,
716                                           bool dmub_int_thread_offload)
717 {
718         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719                 adev->dm.dmub_callback[type] = callback;
720                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721         } else
722                 return false;
723
724         return true;
725 }
726
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729         struct dmub_hpd_work *dmub_hpd_wrk;
730
731         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732
733         if (!dmub_hpd_wrk->dmub_notify) {
734                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735                 return;
736         }
737
738         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740                 dmub_hpd_wrk->dmub_notify);
741         }
742
743         kfree(dmub_hpd_wrk->dmub_notify);
744         kfree(dmub_hpd_wrk);
745
746 }
747
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758         struct dmub_notification notify;
759         struct common_irq_params *irq_params = interrupt_params;
760         struct amdgpu_device *adev = irq_params->adev;
761         struct amdgpu_display_manager *dm = &adev->dm;
762         struct dmcub_trace_buf_entry entry = { 0 };
763         uint32_t count = 0;
764         struct dmub_hpd_work *dmub_hpd_wrk;
765         struct dc_link *plink = NULL;
766
767         if (dc_enable_dmub_notifications(adev->dm.dc) &&
768                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769
770                 do {
771                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
773                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
774                                 continue;
775                         }
776                         if (!dm->dmub_callback[notify.type]) {
777                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778                                 continue;
779                         }
780                         if (dm->dmub_thread_offload[notify.type] == true) {
781                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782                                 if (!dmub_hpd_wrk) {
783                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784                                         return;
785                                 }
786                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787                                 if (!dmub_hpd_wrk->dmub_notify) {
788                                         kfree(dmub_hpd_wrk);
789                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790                                         return;
791                                 }
792                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793                                 if (dmub_hpd_wrk->dmub_notify)
794                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795                                 dmub_hpd_wrk->adev = adev;
796                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
797                                         plink = adev->dm.dc->links[notify.link_index];
798                                         if (plink) {
799                                                 plink->hpd_status =
800                                                         notify.hpd_status == DP_HPD_PLUG;
801                                         }
802                                 }
803                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804                         } else {
805                                 dm->dmub_callback[notify.type](adev, &notify);
806                         }
807                 } while (notify.pending_notification);
808         }
809
810
811         do {
812                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814                                                         entry.param0, entry.param1);
815
816                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818                 } else
819                         break;
820
821                 count++;
822
823         } while (count <= DMUB_TRACE_MAX_READ);
824
825         if (count > DMUB_TRACE_MAX_READ)
826                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828
829 static int dm_set_clockgating_state(void *handle,
830                   enum amd_clockgating_state state)
831 {
832         return 0;
833 }
834
835 static int dm_set_powergating_state(void *handle,
836                   enum amd_powergating_state state)
837 {
838         return 0;
839 }
840
841 /* Prototypes of private functions */
842 static int dm_early_init(void* handle);
843
844 /* Allocate memory for FBC compressed data  */
845 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 {
847         struct drm_device *dev = connector->dev;
848         struct amdgpu_device *adev = drm_to_adev(dev);
849         struct dm_compressor_info *compressor = &adev->dm.compressor;
850         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851         struct drm_display_mode *mode;
852         unsigned long max_size = 0;
853
854         if (adev->dm.dc->fbc_compressor == NULL)
855                 return;
856
857         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
858                 return;
859
860         if (compressor->bo_ptr)
861                 return;
862
863
864         list_for_each_entry(mode, &connector->modes, head) {
865                 if (max_size < mode->htotal * mode->vtotal)
866                         max_size = mode->htotal * mode->vtotal;
867         }
868
869         if (max_size) {
870                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
871                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
872                             &compressor->gpu_addr, &compressor->cpu_addr);
873
874                 if (r)
875                         DRM_ERROR("DM: Failed to initialize FBC\n");
876                 else {
877                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879                 }
880
881         }
882
883 }
884
885 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886                                           int pipe, bool *enabled,
887                                           unsigned char *buf, int max_bytes)
888 {
889         struct drm_device *dev = dev_get_drvdata(kdev);
890         struct amdgpu_device *adev = drm_to_adev(dev);
891         struct drm_connector *connector;
892         struct drm_connector_list_iter conn_iter;
893         struct amdgpu_dm_connector *aconnector;
894         int ret = 0;
895
896         *enabled = false;
897
898         mutex_lock(&adev->dm.audio_lock);
899
900         drm_connector_list_iter_begin(dev, &conn_iter);
901         drm_for_each_connector_iter(connector, &conn_iter) {
902                 aconnector = to_amdgpu_dm_connector(connector);
903                 if (aconnector->audio_inst != port)
904                         continue;
905
906                 *enabled = true;
907                 ret = drm_eld_size(connector->eld);
908                 memcpy(buf, connector->eld, min(max_bytes, ret));
909
910                 break;
911         }
912         drm_connector_list_iter_end(&conn_iter);
913
914         mutex_unlock(&adev->dm.audio_lock);
915
916         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917
918         return ret;
919 }
920
921 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922         .get_eld = amdgpu_dm_audio_component_get_eld,
923 };
924
925 static int amdgpu_dm_audio_component_bind(struct device *kdev,
926                                        struct device *hda_kdev, void *data)
927 {
928         struct drm_device *dev = dev_get_drvdata(kdev);
929         struct amdgpu_device *adev = drm_to_adev(dev);
930         struct drm_audio_component *acomp = data;
931
932         acomp->ops = &amdgpu_dm_audio_component_ops;
933         acomp->dev = kdev;
934         adev->dm.audio_component = acomp;
935
936         return 0;
937 }
938
939 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940                                           struct device *hda_kdev, void *data)
941 {
942         struct drm_device *dev = dev_get_drvdata(kdev);
943         struct amdgpu_device *adev = drm_to_adev(dev);
944         struct drm_audio_component *acomp = data;
945
946         acomp->ops = NULL;
947         acomp->dev = NULL;
948         adev->dm.audio_component = NULL;
949 }
950
951 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952         .bind   = amdgpu_dm_audio_component_bind,
953         .unbind = amdgpu_dm_audio_component_unbind,
954 };
955
956 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 {
958         int i, ret;
959
960         if (!amdgpu_audio)
961                 return 0;
962
963         adev->mode_info.audio.enabled = true;
964
965         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966
967         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968                 adev->mode_info.audio.pin[i].channels = -1;
969                 adev->mode_info.audio.pin[i].rate = -1;
970                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
971                 adev->mode_info.audio.pin[i].status_bits = 0;
972                 adev->mode_info.audio.pin[i].category_code = 0;
973                 adev->mode_info.audio.pin[i].connected = false;
974                 adev->mode_info.audio.pin[i].id =
975                         adev->dm.dc->res_pool->audios[i]->inst;
976                 adev->mode_info.audio.pin[i].offset = 0;
977         }
978
979         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980         if (ret < 0)
981                 return ret;
982
983         adev->dm.audio_registered = true;
984
985         return 0;
986 }
987
988 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989 {
990         if (!amdgpu_audio)
991                 return;
992
993         if (!adev->mode_info.audio.enabled)
994                 return;
995
996         if (adev->dm.audio_registered) {
997                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998                 adev->dm.audio_registered = false;
999         }
1000
1001         /* TODO: Disable audio? */
1002
1003         adev->mode_info.audio.enabled = false;
1004 }
1005
1006 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 {
1008         struct drm_audio_component *acomp = adev->dm.audio_component;
1009
1010         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012
1013                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014                                                  pin, -1);
1015         }
1016 }
1017
1018 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 {
1020         const struct dmcub_firmware_header_v1_0 *hdr;
1021         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1022         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1023         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025         struct abm *abm = adev->dm.dc->res_pool->abm;
1026         struct dmub_srv_hw_params hw_params;
1027         enum dmub_status status;
1028         const unsigned char *fw_inst_const, *fw_bss_data;
1029         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1030         bool has_hw_support;
1031
1032         if (!dmub_srv)
1033                 /* DMUB isn't supported on the ASIC. */
1034                 return 0;
1035
1036         if (!fb_info) {
1037                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038                 return -EINVAL;
1039         }
1040
1041         if (!dmub_fw) {
1042                 /* Firmware required for DMUB support. */
1043                 DRM_ERROR("No firmware provided for DMUB.\n");
1044                 return -EINVAL;
1045         }
1046
1047         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048         if (status != DMUB_STATUS_OK) {
1049                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050                 return -EINVAL;
1051         }
1052
1053         if (!has_hw_support) {
1054                 DRM_INFO("DMUB unsupported on ASIC\n");
1055                 return 0;
1056         }
1057
1058         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059         status = dmub_srv_hw_reset(dmub_srv);
1060         if (status != DMUB_STATUS_OK)
1061                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062
1063         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064
1065         fw_inst_const = dmub_fw->data +
1066                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067                         PSP_HEADER_BYTES;
1068
1069         fw_bss_data = dmub_fw->data +
1070                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071                       le32_to_cpu(hdr->inst_const_bytes);
1072
1073         /* Copy firmware and bios info into FB memory. */
1074         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076
1077         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078
1079         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080          * amdgpu_ucode_init_single_fw will load dmub firmware
1081          * fw_inst_const part to cw0; otherwise, the firmware back door load
1082          * will be done by dm_dmub_hw_init
1083          */
1084         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086                                 fw_inst_const_size);
1087         }
1088
1089         if (fw_bss_data_size)
1090                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091                        fw_bss_data, fw_bss_data_size);
1092
1093         /* Copy firmware bios info into FB memory. */
1094         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095                adev->bios_size);
1096
1097         /* Reset regions that need to be reset. */
1098         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100
1101         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106
1107         /* Initialize hardware. */
1108         memset(&hw_params, 0, sizeof(hw_params));
1109         hw_params.fb_base = adev->gmc.fb_start;
1110         hw_params.fb_offset = adev->gmc.aper_base;
1111
1112         /* backdoor load firmware and trigger dmub running */
1113         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114                 hw_params.load_inst_const = true;
1115
1116         if (dmcu)
1117                 hw_params.psp_version = dmcu->psp_version;
1118
1119         for (i = 0; i < fb_info->num_fb; ++i)
1120                 hw_params.fb[i] = &fb_info->fb[i];
1121
1122         switch (adev->ip_versions[DCE_HWIP][0]) {
1123         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124                 hw_params.dpia_supported = true;
1125                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126                 break;
1127         default:
1128                 break;
1129         }
1130
1131         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132         if (status != DMUB_STATUS_OK) {
1133                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134                 return -EINVAL;
1135         }
1136
1137         /* Wait for firmware load to finish. */
1138         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139         if (status != DMUB_STATUS_OK)
1140                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141
1142         /* Init DMCU and ABM if available. */
1143         if (dmcu && abm) {
1144                 dmcu->funcs->dmcu_init(dmcu);
1145                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146         }
1147
1148         if (!adev->dm.dc->ctx->dmub_srv)
1149                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150         if (!adev->dm.dc->ctx->dmub_srv) {
1151                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152                 return -ENOMEM;
1153         }
1154
1155         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156                  adev->dm.dmcub_fw_version);
1157
1158         return 0;
1159 }
1160
1161 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162 {
1163         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164         enum dmub_status status;
1165         bool init;
1166
1167         if (!dmub_srv) {
1168                 /* DMUB isn't supported on the ASIC. */
1169                 return;
1170         }
1171
1172         status = dmub_srv_is_hw_init(dmub_srv, &init);
1173         if (status != DMUB_STATUS_OK)
1174                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175
1176         if (status == DMUB_STATUS_OK && init) {
1177                 /* Wait for firmware load to finish. */
1178                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179                 if (status != DMUB_STATUS_OK)
1180                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181         } else {
1182                 /* Perform the full hardware initialization. */
1183                 dm_dmub_hw_init(adev);
1184         }
1185 }
1186
1187 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1188 {
1189         uint64_t pt_base;
1190         uint32_t logical_addr_low;
1191         uint32_t logical_addr_high;
1192         uint32_t agp_base, agp_bot, agp_top;
1193         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1194
1195         memset(pa_config, 0, sizeof(*pa_config));
1196
1197         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1199
1200         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201                 /*
1202                  * Raven2 has a HW issue that it is unable to use the vram which
1203                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204                  * workaround that increase system aperture high address (add 1)
1205                  * to get rid of the VM fault and hardware hang.
1206                  */
1207                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208         else
1209                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1210
1211         agp_base = 0;
1212         agp_bot = adev->gmc.agp_start >> 24;
1213         agp_top = adev->gmc.agp_end >> 24;
1214
1215
1216         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221         page_table_base.low_part = lower_32_bits(pt_base);
1222
1223         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225
1226         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229
1230         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233
1234         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237
1238         pa_config->is_hvm_enabled = 0;
1239
1240 }
1241
1242 static void vblank_control_worker(struct work_struct *work)
1243 {
1244         struct vblank_control_work *vblank_work =
1245                 container_of(work, struct vblank_control_work, work);
1246         struct amdgpu_display_manager *dm = vblank_work->dm;
1247
1248         mutex_lock(&dm->dc_lock);
1249
1250         if (vblank_work->enable)
1251                 dm->active_vblank_irq_count++;
1252         else if(dm->active_vblank_irq_count)
1253                 dm->active_vblank_irq_count--;
1254
1255         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1256
1257         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1258
1259         /* Control PSR based on vblank requirements from OS */
1260         if (vblank_work->stream && vblank_work->stream->link) {
1261                 if (vblank_work->enable) {
1262                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263                                 amdgpu_dm_psr_disable(vblank_work->stream);
1264                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267                         amdgpu_dm_psr_enable(vblank_work->stream);
1268                 }
1269         }
1270
1271         mutex_unlock(&dm->dc_lock);
1272
1273         dc_stream_release(vblank_work->stream);
1274
1275         kfree(vblank_work);
1276 }
1277
1278 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279 {
1280         struct hpd_rx_irq_offload_work *offload_work;
1281         struct amdgpu_dm_connector *aconnector;
1282         struct dc_link *dc_link;
1283         struct amdgpu_device *adev;
1284         enum dc_connection_type new_connection_type = dc_connection_none;
1285         unsigned long flags;
1286
1287         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288         aconnector = offload_work->offload_wq->aconnector;
1289
1290         if (!aconnector) {
1291                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292                 goto skip;
1293         }
1294
1295         adev = drm_to_adev(aconnector->base.dev);
1296         dc_link = aconnector->dc_link;
1297
1298         mutex_lock(&aconnector->hpd_lock);
1299         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300                 DRM_ERROR("KMS: Failed to detect connector\n");
1301         mutex_unlock(&aconnector->hpd_lock);
1302
1303         if (new_connection_type == dc_connection_none)
1304                 goto skip;
1305
1306         if (amdgpu_in_reset(adev))
1307                 goto skip;
1308
1309         mutex_lock(&adev->dm.dc_lock);
1310         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311                 dc_link_dp_handle_automated_test(dc_link);
1312         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315                 dc_link_dp_handle_link_loss(dc_link);
1316                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317                 offload_work->offload_wq->is_handling_link_loss = false;
1318                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319         }
1320         mutex_unlock(&adev->dm.dc_lock);
1321
1322 skip:
1323         kfree(offload_work);
1324
1325 }
1326
1327 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328 {
1329         int max_caps = dc->caps.max_links;
1330         int i = 0;
1331         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332
1333         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334
1335         if (!hpd_rx_offload_wq)
1336                 return NULL;
1337
1338
1339         for (i = 0; i < max_caps; i++) {
1340                 hpd_rx_offload_wq[i].wq =
1341                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342
1343                 if (hpd_rx_offload_wq[i].wq == NULL) {
1344                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345                         return NULL;
1346                 }
1347
1348                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349         }
1350
1351         return hpd_rx_offload_wq;
1352 }
1353
1354 struct amdgpu_stutter_quirk {
1355         u16 chip_vendor;
1356         u16 chip_device;
1357         u16 subsys_vendor;
1358         u16 subsys_device;
1359         u8 revision;
1360 };
1361
1362 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365         { 0, 0, 0, 0, 0 },
1366 };
1367
1368 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369 {
1370         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371
1372         while (p && p->chip_device != 0) {
1373                 if (pdev->vendor == p->chip_vendor &&
1374                     pdev->device == p->chip_device &&
1375                     pdev->subsystem_vendor == p->subsys_vendor &&
1376                     pdev->subsystem_device == p->subsys_device &&
1377                     pdev->revision == p->revision) {
1378                         return true;
1379                 }
1380                 ++p;
1381         }
1382         return false;
1383 }
1384
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387         struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389         struct dc_callback_init init_params;
1390 #endif
1391         int r;
1392
1393         adev->dm.ddev = adev_to_drm(adev);
1394         adev->dm.adev = adev;
1395
1396         /* Zero all the fields */
1397         memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399         memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401
1402         mutex_init(&adev->dm.dc_lock);
1403         mutex_init(&adev->dm.audio_lock);
1404         spin_lock_init(&adev->dm.vblank_lock);
1405
1406         if(amdgpu_dm_irq_init(adev)) {
1407                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408                 goto error;
1409         }
1410
1411         init_data.asic_id.chip_family = adev->family;
1412
1413         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1414         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1415         init_data.asic_id.chip_id = adev->pdev->device;
1416
1417         init_data.asic_id.vram_width = adev->gmc.vram_width;
1418         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419         init_data.asic_id.atombios_base_address =
1420                 adev->mode_info.atom_context->bios;
1421
1422         init_data.driver = adev;
1423
1424         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425
1426         if (!adev->dm.cgs_device) {
1427                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428                 goto error;
1429         }
1430
1431         init_data.cgs_device = adev->dm.cgs_device;
1432
1433         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434
1435         switch (adev->ip_versions[DCE_HWIP][0]) {
1436         case IP_VERSION(2, 1, 0):
1437                 switch (adev->dm.dmcub_fw_version) {
1438                 case 0: /* development */
1439                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1440                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441                         init_data.flags.disable_dmcu = false;
1442                         break;
1443                 default:
1444                         init_data.flags.disable_dmcu = true;
1445                 }
1446                 break;
1447         case IP_VERSION(2, 0, 3):
1448                 init_data.flags.disable_dmcu = true;
1449                 break;
1450         default:
1451                 break;
1452         }
1453
1454         switch (adev->asic_type) {
1455         case CHIP_CARRIZO:
1456         case CHIP_STONEY:
1457                 init_data.flags.gpu_vm_support = true;
1458                 break;
1459         default:
1460                 switch (adev->ip_versions[DCE_HWIP][0]) {
1461                 case IP_VERSION(1, 0, 0):
1462                 case IP_VERSION(1, 0, 1):
1463                         /* enable S/G on PCO and RV2 */
1464                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1466                                 init_data.flags.gpu_vm_support = true;
1467                         break;
1468                 case IP_VERSION(2, 1, 0):
1469                 case IP_VERSION(3, 0, 1):
1470                 case IP_VERSION(3, 1, 2):
1471                 case IP_VERSION(3, 1, 3):
1472                 case IP_VERSION(3, 1, 5):
1473                 case IP_VERSION(3, 1, 6):
1474                         init_data.flags.gpu_vm_support = true;
1475                         break;
1476                 default:
1477                         break;
1478                 }
1479                 break;
1480         }
1481
1482         if (init_data.flags.gpu_vm_support)
1483                 adev->mode_info.gpu_vm_support = true;
1484
1485         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486                 init_data.flags.fbc_support = true;
1487
1488         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489                 init_data.flags.multi_mon_pp_mclk_switch = true;
1490
1491         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492                 init_data.flags.disable_fractional_pwm = true;
1493
1494         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495                 init_data.flags.edp_no_power_sequencing = true;
1496
1497         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1501
1502         init_data.flags.seamless_boot_edp_requested = false;
1503
1504         if (check_seamless_boot_capability(adev)) {
1505                 init_data.flags.seamless_boot_edp_requested = true;
1506                 init_data.flags.allow_seamless_boot_optimization = true;
1507                 DRM_INFO("Seamless boot condition check passed\n");
1508         }
1509
1510         INIT_LIST_HEAD(&adev->dm.da_list);
1511         /* Display Core create. */
1512         adev->dm.dc = dc_create(&init_data);
1513
1514         if (adev->dm.dc) {
1515                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1516         } else {
1517                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1518                 goto error;
1519         }
1520
1521         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1522                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1523                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1524         }
1525
1526         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1527                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1528         if (dm_should_disable_stutter(adev->pdev))
1529                 adev->dm.dc->debug.disable_stutter = true;
1530
1531         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1532                 adev->dm.dc->debug.disable_stutter = true;
1533
1534         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1535                 adev->dm.dc->debug.disable_dsc = true;
1536                 adev->dm.dc->debug.disable_dsc_edp = true;
1537         }
1538
1539         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1540                 adev->dm.dc->debug.disable_clock_gate = true;
1541
1542         r = dm_dmub_hw_init(adev);
1543         if (r) {
1544                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545                 goto error;
1546         }
1547
1548         dc_hardware_init(adev->dm.dc);
1549
1550         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1551         if (!adev->dm.hpd_rx_offload_wq) {
1552                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553                 goto error;
1554         }
1555
1556         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1557                 struct dc_phy_addr_space_config pa_config;
1558
1559                 mmhub_read_system_context(adev, &pa_config);
1560
1561                 // Call the DC init_memory func
1562                 dc_setup_system_context(adev->dm.dc, &pa_config);
1563         }
1564
1565         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1566         if (!adev->dm.freesync_module) {
1567                 DRM_ERROR(
1568                 "amdgpu: failed to initialize freesync_module.\n");
1569         } else
1570                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1571                                 adev->dm.freesync_module);
1572
1573         amdgpu_dm_init_color_mod();
1574
1575         if (adev->dm.dc->caps.max_links > 0) {
1576                 adev->dm.vblank_control_workqueue =
1577                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1578                 if (!adev->dm.vblank_control_workqueue)
1579                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580         }
1581
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585
1586                 if (!adev->dm.hdcp_workqueue)
1587                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588                 else
1589                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590
1591                 dc_init_callbacks(adev->dm.dc, &init_params);
1592         }
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1598                 init_completion(&adev->dm.dmub_aux_transfer_done);
1599                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600                 if (!adev->dm.dmub_notify) {
1601                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602                         goto error;
1603                 }
1604
1605                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606                 if (!adev->dm.delayed_hpd_wq) {
1607                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608                         goto error;
1609                 }
1610
1611                 amdgpu_dm_outbox_init(adev);
1612                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1613                         dmub_aux_setconfig_callback, false)) {
1614                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1615                         goto error;
1616                 }
1617                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1618                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1619                         goto error;
1620                 }
1621                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1622                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623                         goto error;
1624                 }
1625         }
1626
1627         if (amdgpu_dm_initialize_drm_device(adev)) {
1628                 DRM_ERROR(
1629                 "amdgpu: failed to initialize sw for display support.\n");
1630                 goto error;
1631         }
1632
1633         /* create fake encoders for MST */
1634         dm_dp_create_fake_mst_encoders(adev);
1635
1636         /* TODO: Add_display_info? */
1637
1638         /* TODO use dynamic cursor width */
1639         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641
1642         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643                 DRM_ERROR(
1644                 "amdgpu: failed to initialize sw for display support.\n");
1645                 goto error;
1646         }
1647
1648
1649         DRM_DEBUG_DRIVER("KMS initialized.\n");
1650
1651         return 0;
1652 error:
1653         amdgpu_dm_fini(adev);
1654
1655         return -EINVAL;
1656 }
1657
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661
1662         amdgpu_dm_audio_fini(adev);
1663
1664         return 0;
1665 }
1666
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669         int i;
1670
1671         if (adev->dm.vblank_control_workqueue) {
1672                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1673                 adev->dm.vblank_control_workqueue = NULL;
1674         }
1675
1676         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1677                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1678         }
1679
1680         amdgpu_dm_destroy_drm_device(&adev->dm);
1681
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683         if (adev->dm.crc_rd_wrk) {
1684                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1685                 kfree(adev->dm.crc_rd_wrk);
1686                 adev->dm.crc_rd_wrk = NULL;
1687         }
1688 #endif
1689 #ifdef CONFIG_DRM_AMD_DC_HDCP
1690         if (adev->dm.hdcp_workqueue) {
1691                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1692                 adev->dm.hdcp_workqueue = NULL;
1693         }
1694
1695         if (adev->dm.dc)
1696                 dc_deinit_callbacks(adev->dm.dc);
1697 #endif
1698
1699         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1700
1701         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1702                 kfree(adev->dm.dmub_notify);
1703                 adev->dm.dmub_notify = NULL;
1704                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1705                 adev->dm.delayed_hpd_wq = NULL;
1706         }
1707
1708         if (adev->dm.dmub_bo)
1709                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1710                                       &adev->dm.dmub_bo_gpu_addr,
1711                                       &adev->dm.dmub_bo_cpu_addr);
1712
1713         if (adev->dm.hpd_rx_offload_wq) {
1714                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1715                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1716                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1717                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1718                         }
1719                 }
1720
1721                 kfree(adev->dm.hpd_rx_offload_wq);
1722                 adev->dm.hpd_rx_offload_wq = NULL;
1723         }
1724
1725         /* DC Destroy TODO: Replace destroy DAL */
1726         if (adev->dm.dc)
1727                 dc_destroy(&adev->dm.dc);
1728         /*
1729          * TODO: pageflip, vlank interrupt
1730          *
1731          * amdgpu_dm_irq_fini(adev);
1732          */
1733
1734         if (adev->dm.cgs_device) {
1735                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1736                 adev->dm.cgs_device = NULL;
1737         }
1738         if (adev->dm.freesync_module) {
1739                 mod_freesync_destroy(adev->dm.freesync_module);
1740                 adev->dm.freesync_module = NULL;
1741         }
1742
1743         mutex_destroy(&adev->dm.audio_lock);
1744         mutex_destroy(&adev->dm.dc_lock);
1745
1746         return;
1747 }
1748
1749 static int load_dmcu_fw(struct amdgpu_device *adev)
1750 {
1751         const char *fw_name_dmcu = NULL;
1752         int r;
1753         const struct dmcu_firmware_header_v1_0 *hdr;
1754
1755         switch(adev->asic_type) {
1756 #if defined(CONFIG_DRM_AMD_DC_SI)
1757         case CHIP_TAHITI:
1758         case CHIP_PITCAIRN:
1759         case CHIP_VERDE:
1760         case CHIP_OLAND:
1761 #endif
1762         case CHIP_BONAIRE:
1763         case CHIP_HAWAII:
1764         case CHIP_KAVERI:
1765         case CHIP_KABINI:
1766         case CHIP_MULLINS:
1767         case CHIP_TONGA:
1768         case CHIP_FIJI:
1769         case CHIP_CARRIZO:
1770         case CHIP_STONEY:
1771         case CHIP_POLARIS11:
1772         case CHIP_POLARIS10:
1773         case CHIP_POLARIS12:
1774         case CHIP_VEGAM:
1775         case CHIP_VEGA10:
1776         case CHIP_VEGA12:
1777         case CHIP_VEGA20:
1778                 return 0;
1779         case CHIP_NAVI12:
1780                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1781                 break;
1782         case CHIP_RAVEN:
1783                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1784                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1785                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1786                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787                 else
1788                         return 0;
1789                 break;
1790         default:
1791                 switch (adev->ip_versions[DCE_HWIP][0]) {
1792                 case IP_VERSION(2, 0, 2):
1793                 case IP_VERSION(2, 0, 3):
1794                 case IP_VERSION(2, 0, 0):
1795                 case IP_VERSION(2, 1, 0):
1796                 case IP_VERSION(3, 0, 0):
1797                 case IP_VERSION(3, 0, 2):
1798                 case IP_VERSION(3, 0, 3):
1799                 case IP_VERSION(3, 0, 1):
1800                 case IP_VERSION(3, 1, 2):
1801                 case IP_VERSION(3, 1, 3):
1802                 case IP_VERSION(3, 1, 5):
1803                 case IP_VERSION(3, 1, 6):
1804                         return 0;
1805                 default:
1806                         break;
1807                 }
1808                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809                 return -EINVAL;
1810         }
1811
1812         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814                 return 0;
1815         }
1816
1817         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818         if (r == -ENOENT) {
1819                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821                 adev->dm.fw_dmcu = NULL;
1822                 return 0;
1823         }
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826                         fw_name_dmcu);
1827                 return r;
1828         }
1829
1830         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831         if (r) {
1832                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833                         fw_name_dmcu);
1834                 release_firmware(adev->dm.fw_dmcu);
1835                 adev->dm.fw_dmcu = NULL;
1836                 return r;
1837         }
1838
1839         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842         adev->firmware.fw_size +=
1843                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844
1845         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847         adev->firmware.fw_size +=
1848                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849
1850         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851
1852         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853
1854         return 0;
1855 }
1856
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859         struct amdgpu_device *adev = ctx;
1860
1861         return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865                                      uint32_t value)
1866 {
1867         struct amdgpu_device *adev = ctx;
1868
1869         return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874         struct dmub_srv_create_params create_params;
1875         struct dmub_srv_region_params region_params;
1876         struct dmub_srv_region_info region_info;
1877         struct dmub_srv_fb_params fb_params;
1878         struct dmub_srv_fb_info *fb_info;
1879         struct dmub_srv *dmub_srv;
1880         const struct dmcub_firmware_header_v1_0 *hdr;
1881         const char *fw_name_dmub;
1882         enum dmub_asic dmub_asic;
1883         enum dmub_status status;
1884         int r;
1885
1886         switch (adev->ip_versions[DCE_HWIP][0]) {
1887         case IP_VERSION(2, 1, 0):
1888                 dmub_asic = DMUB_ASIC_DCN21;
1889                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892                 break;
1893         case IP_VERSION(3, 0, 0):
1894                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895                         dmub_asic = DMUB_ASIC_DCN30;
1896                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897                 } else {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900                 }
1901                 break;
1902         case IP_VERSION(3, 0, 1):
1903                 dmub_asic = DMUB_ASIC_DCN301;
1904                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905                 break;
1906         case IP_VERSION(3, 0, 2):
1907                 dmub_asic = DMUB_ASIC_DCN302;
1908                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909                 break;
1910         case IP_VERSION(3, 0, 3):
1911                 dmub_asic = DMUB_ASIC_DCN303;
1912                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913                 break;
1914         case IP_VERSION(3, 1, 2):
1915         case IP_VERSION(3, 1, 3):
1916                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918                 break;
1919         case IP_VERSION(3, 1, 5):
1920                 dmub_asic = DMUB_ASIC_DCN315;
1921                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1922                 break;
1923         case IP_VERSION(3, 1, 6):
1924                 dmub_asic = DMUB_ASIC_DCN316;
1925                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1926                 break;
1927         default:
1928                 /* ASIC doesn't support DMUB. */
1929                 return 0;
1930         }
1931
1932         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1933         if (r) {
1934                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1935                 return 0;
1936         }
1937
1938         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1939         if (r) {
1940                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1941                 return 0;
1942         }
1943
1944         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1945         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946
1947         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1948                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1949                         AMDGPU_UCODE_ID_DMCUB;
1950                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1951                         adev->dm.dmub_fw;
1952                 adev->firmware.fw_size +=
1953                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1954
1955                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1956                          adev->dm.dmcub_fw_version);
1957         }
1958
1959
1960         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1961         dmub_srv = adev->dm.dmub_srv;
1962
1963         if (!dmub_srv) {
1964                 DRM_ERROR("Failed to allocate DMUB service!\n");
1965                 return -ENOMEM;
1966         }
1967
1968         memset(&create_params, 0, sizeof(create_params));
1969         create_params.user_ctx = adev;
1970         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1971         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1972         create_params.asic = dmub_asic;
1973
1974         /* Create the DMUB service. */
1975         status = dmub_srv_create(dmub_srv, &create_params);
1976         if (status != DMUB_STATUS_OK) {
1977                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1978                 return -EINVAL;
1979         }
1980
1981         /* Calculate the size of all the regions for the DMUB service. */
1982         memset(&region_params, 0, sizeof(region_params));
1983
1984         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1985                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1986         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1987         region_params.vbios_size = adev->bios_size;
1988         region_params.fw_bss_data = region_params.bss_data_size ?
1989                 adev->dm.dmub_fw->data +
1990                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1992         region_params.fw_inst_const =
1993                 adev->dm.dmub_fw->data +
1994                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1995                 PSP_HEADER_BYTES;
1996
1997         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1998                                            &region_info);
1999
2000         if (status != DMUB_STATUS_OK) {
2001                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002                 return -EINVAL;
2003         }
2004
2005         /*
2006          * Allocate a framebuffer based on the total size of all the regions.
2007          * TODO: Move this into GART.
2008          */
2009         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2010                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2011                                     &adev->dm.dmub_bo_gpu_addr,
2012                                     &adev->dm.dmub_bo_cpu_addr);
2013         if (r)
2014                 return r;
2015
2016         /* Rebase the regions on the framebuffer address. */
2017         memset(&fb_params, 0, sizeof(fb_params));
2018         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2019         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2020         fb_params.region_info = &region_info;
2021
2022         adev->dm.dmub_fb_info =
2023                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2024         fb_info = adev->dm.dmub_fb_info;
2025
2026         if (!fb_info) {
2027                 DRM_ERROR(
2028                         "Failed to allocate framebuffer info for DMUB service!\n");
2029                 return -ENOMEM;
2030         }
2031
2032         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2033         if (status != DMUB_STATUS_OK) {
2034                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2035                 return -EINVAL;
2036         }
2037
2038         return 0;
2039 }
2040
2041 static int dm_sw_init(void *handle)
2042 {
2043         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2044         int r;
2045
2046         r = dm_dmub_sw_init(adev);
2047         if (r)
2048                 return r;
2049
2050         return load_dmcu_fw(adev);
2051 }
2052
2053 static int dm_sw_fini(void *handle)
2054 {
2055         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056
2057         kfree(adev->dm.dmub_fb_info);
2058         adev->dm.dmub_fb_info = NULL;
2059
2060         if (adev->dm.dmub_srv) {
2061                 dmub_srv_destroy(adev->dm.dmub_srv);
2062                 adev->dm.dmub_srv = NULL;
2063         }
2064
2065         release_firmware(adev->dm.dmub_fw);
2066         adev->dm.dmub_fw = NULL;
2067
2068         release_firmware(adev->dm.fw_dmcu);
2069         adev->dm.fw_dmcu = NULL;
2070
2071         return 0;
2072 }
2073
2074 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2075 {
2076         struct amdgpu_dm_connector *aconnector;
2077         struct drm_connector *connector;
2078         struct drm_connector_list_iter iter;
2079         int ret = 0;
2080
2081         drm_connector_list_iter_begin(dev, &iter);
2082         drm_for_each_connector_iter(connector, &iter) {
2083                 aconnector = to_amdgpu_dm_connector(connector);
2084                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2085                     aconnector->mst_mgr.aux) {
2086                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2087                                          aconnector,
2088                                          aconnector->base.base.id);
2089
2090                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2091                         if (ret < 0) {
2092                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2093                                 aconnector->dc_link->type =
2094                                         dc_connection_single;
2095                                 break;
2096                         }
2097                 }
2098         }
2099         drm_connector_list_iter_end(&iter);
2100
2101         return ret;
2102 }
2103
2104 static int dm_late_init(void *handle)
2105 {
2106         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107
2108         struct dmcu_iram_parameters params;
2109         unsigned int linear_lut[16];
2110         int i;
2111         struct dmcu *dmcu = NULL;
2112
2113         dmcu = adev->dm.dc->res_pool->dmcu;
2114
2115         for (i = 0; i < 16; i++)
2116                 linear_lut[i] = 0xFFFF * i / 15;
2117
2118         params.set = 0;
2119         params.backlight_ramping_override = false;
2120         params.backlight_ramping_start = 0xCCCC;
2121         params.backlight_ramping_reduction = 0xCCCCCCCC;
2122         params.backlight_lut_array_size = 16;
2123         params.backlight_lut_array = linear_lut;
2124
2125         /* Min backlight level after ABM reduction,  Don't allow below 1%
2126          * 0xFFFF x 0.01 = 0x28F
2127          */
2128         params.min_abm_backlight = 0x28F;
2129         /* In the case where abm is implemented on dmcub,
2130         * dmcu object will be null.
2131         * ABM 2.4 and up are implemented on dmcub.
2132         */
2133         if (dmcu) {
2134                 if (!dmcu_load_iram(dmcu, params))
2135                         return -EINVAL;
2136         } else if (adev->dm.dc->ctx->dmub_srv) {
2137                 struct dc_link *edp_links[MAX_NUM_EDP];
2138                 int edp_num;
2139
2140                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2141                 for (i = 0; i < edp_num; i++) {
2142                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143                                 return -EINVAL;
2144                 }
2145         }
2146
2147         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2148 }
2149
2150 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2151 {
2152         struct amdgpu_dm_connector *aconnector;
2153         struct drm_connector *connector;
2154         struct drm_connector_list_iter iter;
2155         struct drm_dp_mst_topology_mgr *mgr;
2156         int ret;
2157         bool need_hotplug = false;
2158
2159         drm_connector_list_iter_begin(dev, &iter);
2160         drm_for_each_connector_iter(connector, &iter) {
2161                 aconnector = to_amdgpu_dm_connector(connector);
2162                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2163                     aconnector->mst_port)
2164                         continue;
2165
2166                 mgr = &aconnector->mst_mgr;
2167
2168                 if (suspend) {
2169                         drm_dp_mst_topology_mgr_suspend(mgr);
2170                 } else {
2171                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2172                         if (ret < 0) {
2173                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2174                                 need_hotplug = true;
2175                         }
2176                 }
2177         }
2178         drm_connector_list_iter_end(&iter);
2179
2180         if (need_hotplug)
2181                 drm_kms_helper_hotplug_event(dev);
2182 }
2183
2184 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2185 {
2186         int ret = 0;
2187
2188         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189          * on window driver dc implementation.
2190          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191          * should be passed to smu during boot up and resume from s3.
2192          * boot up: dc calculate dcn watermark clock settings within dc_create,
2193          * dcn20_resource_construct
2194          * then call pplib functions below to pass the settings to smu:
2195          * smu_set_watermarks_for_clock_ranges
2196          * smu_set_watermarks_table
2197          * navi10_set_watermarks_table
2198          * smu_write_watermarks_table
2199          *
2200          * For Renoir, clock settings of dcn watermark are also fixed values.
2201          * dc has implemented different flow for window driver:
2202          * dc_hardware_init / dc_set_power_state
2203          * dcn10_init_hw
2204          * notify_wm_ranges
2205          * set_wm_ranges
2206          * -- Linux
2207          * smu_set_watermarks_for_clock_ranges
2208          * renoir_set_watermarks_table
2209          * smu_write_watermarks_table
2210          *
2211          * For Linux,
2212          * dc_hardware_init -> amdgpu_dm_init
2213          * dc_set_power_state --> dm_resume
2214          *
2215          * therefore, this function apply to navi10/12/14 but not Renoir
2216          * *
2217          */
2218         switch (adev->ip_versions[DCE_HWIP][0]) {
2219         case IP_VERSION(2, 0, 2):
2220         case IP_VERSION(2, 0, 0):
2221                 break;
2222         default:
2223                 return 0;
2224         }
2225
2226         ret = amdgpu_dpm_write_watermarks_table(adev);
2227         if (ret) {
2228                 DRM_ERROR("Failed to update WMTABLE!\n");
2229                 return ret;
2230         }
2231
2232         return 0;
2233 }
2234
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258         /* Create DAL display manager */
2259         amdgpu_dm_init(adev);
2260         amdgpu_dm_hpd_init(adev);
2261
2262         return 0;
2263 }
2264
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276
2277         amdgpu_dm_hpd_fini(adev);
2278
2279         amdgpu_dm_irq_fini(adev);
2280         amdgpu_dm_fini(adev);
2281         return 0;
2282 }
2283
2284
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289                                  struct dc_state *state, bool enable)
2290 {
2291         enum dc_irq_source irq_source;
2292         struct amdgpu_crtc *acrtc;
2293         int rc = -EBUSY;
2294         int i = 0;
2295
2296         for (i = 0; i < state->stream_count; i++) {
2297                 acrtc = get_crtc_by_otg_inst(
2298                                 adev, state->stream_status[i].primary_otg_inst);
2299
2300                 if (acrtc && state->stream_status[i].plane_count != 0) {
2301                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2305                         if (rc)
2306                                 DRM_WARN("Failed to %s pflip interrupts\n",
2307                                          enable ? "enable" : "disable");
2308
2309                         if (enable) {
2310                                 rc = dm_enable_vblank(&acrtc->base);
2311                                 if (rc)
2312                                         DRM_WARN("Failed to enable vblank interrupts\n");
2313                         } else {
2314                                 dm_disable_vblank(&acrtc->base);
2315                         }
2316
2317                 }
2318         }
2319
2320 }
2321
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324         struct dc_state *context = NULL;
2325         enum dc_status res = DC_ERROR_UNEXPECTED;
2326         int i;
2327         struct dc_stream_state *del_streams[MAX_PIPES];
2328         int del_streams_count = 0;
2329
2330         memset(del_streams, 0, sizeof(del_streams));
2331
2332         context = dc_create_state(dc);
2333         if (context == NULL)
2334                 goto context_alloc_fail;
2335
2336         dc_resource_state_copy_construct_current(dc, context);
2337
2338         /* First remove from context all streams */
2339         for (i = 0; i < context->stream_count; i++) {
2340                 struct dc_stream_state *stream = context->streams[i];
2341
2342                 del_streams[del_streams_count++] = stream;
2343         }
2344
2345         /* Remove all planes for removed streams and then remove the streams */
2346         for (i = 0; i < del_streams_count; i++) {
2347                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348                         res = DC_FAIL_DETACH_SURFACES;
2349                         goto fail;
2350                 }
2351
2352                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353                 if (res != DC_OK)
2354                         goto fail;
2355         }
2356
2357         res = dc_commit_state(dc, context);
2358
2359 fail:
2360         dc_release_state(context);
2361
2362 context_alloc_fail:
2363         return res;
2364 }
2365
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368         int i;
2369
2370         if (dm->hpd_rx_offload_wq) {
2371                 for (i = 0; i < dm->dc->caps.max_links; i++)
2372                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373         }
2374 }
2375
2376 static int dm_suspend(void *handle)
2377 {
2378         struct amdgpu_device *adev = handle;
2379         struct amdgpu_display_manager *dm = &adev->dm;
2380         int ret = 0;
2381
2382         if (amdgpu_in_reset(adev)) {
2383                 mutex_lock(&dm->dc_lock);
2384
2385                 dc_allow_idle_optimizations(adev->dm.dc, false);
2386
2387                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2388
2389                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2390
2391                 amdgpu_dm_commit_zero_streams(dm->dc);
2392
2393                 amdgpu_dm_irq_suspend(adev);
2394
2395                 hpd_rx_irq_work_suspend(dm);
2396
2397                 return ret;
2398         }
2399
2400         WARN_ON(adev->dm.cached_state);
2401         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2402
2403         s3_handle_mst(adev_to_drm(adev), true);
2404
2405         amdgpu_dm_irq_suspend(adev);
2406
2407         hpd_rx_irq_work_suspend(dm);
2408
2409         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2410
2411         return 0;
2412 }
2413
2414 struct amdgpu_dm_connector *
2415 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2416                                              struct drm_crtc *crtc)
2417 {
2418         uint32_t i;
2419         struct drm_connector_state *new_con_state;
2420         struct drm_connector *connector;
2421         struct drm_crtc *crtc_from_state;
2422
2423         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2424                 crtc_from_state = new_con_state->crtc;
2425
2426                 if (crtc_from_state == crtc)
2427                         return to_amdgpu_dm_connector(connector);
2428         }
2429
2430         return NULL;
2431 }
2432
2433 static void emulated_link_detect(struct dc_link *link)
2434 {
2435         struct dc_sink_init_data sink_init_data = { 0 };
2436         struct display_sink_capability sink_caps = { 0 };
2437         enum dc_edid_status edid_status;
2438         struct dc_context *dc_ctx = link->ctx;
2439         struct dc_sink *sink = NULL;
2440         struct dc_sink *prev_sink = NULL;
2441
2442         link->type = dc_connection_none;
2443         prev_sink = link->local_sink;
2444
2445         if (prev_sink)
2446                 dc_sink_release(prev_sink);
2447
2448         switch (link->connector_signal) {
2449         case SIGNAL_TYPE_HDMI_TYPE_A: {
2450                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2451                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2452                 break;
2453         }
2454
2455         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2456                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2457                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2458                 break;
2459         }
2460
2461         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2462                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2463                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2464                 break;
2465         }
2466
2467         case SIGNAL_TYPE_LVDS: {
2468                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2469                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2470                 break;
2471         }
2472
2473         case SIGNAL_TYPE_EDP: {
2474                 sink_caps.transaction_type =
2475                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2476                 sink_caps.signal = SIGNAL_TYPE_EDP;
2477                 break;
2478         }
2479
2480         case SIGNAL_TYPE_DISPLAY_PORT: {
2481                 sink_caps.transaction_type =
2482                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2483                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2484                 break;
2485         }
2486
2487         default:
2488                 DC_ERROR("Invalid connector type! signal:%d\n",
2489                         link->connector_signal);
2490                 return;
2491         }
2492
2493         sink_init_data.link = link;
2494         sink_init_data.sink_signal = sink_caps.signal;
2495
2496         sink = dc_sink_create(&sink_init_data);
2497         if (!sink) {
2498                 DC_ERROR("Failed to create sink!\n");
2499                 return;
2500         }
2501
2502         /* dc_sink_create returns a new reference */
2503         link->local_sink = sink;
2504
2505         edid_status = dm_helpers_read_local_edid(
2506                         link->ctx,
2507                         link,
2508                         sink);
2509
2510         if (edid_status != EDID_OK)
2511                 DC_ERROR("Failed to read EDID");
2512
2513 }
2514
2515 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2516                                      struct amdgpu_display_manager *dm)
2517 {
2518         struct {
2519                 struct dc_surface_update surface_updates[MAX_SURFACES];
2520                 struct dc_plane_info plane_infos[MAX_SURFACES];
2521                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2522                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2523                 struct dc_stream_update stream_update;
2524         } * bundle;
2525         int k, m;
2526
2527         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2528
2529         if (!bundle) {
2530                 dm_error("Failed to allocate update bundle\n");
2531                 goto cleanup;
2532         }
2533
2534         for (k = 0; k < dc_state->stream_count; k++) {
2535                 bundle->stream_update.stream = dc_state->streams[k];
2536
2537                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2538                         bundle->surface_updates[m].surface =
2539                                 dc_state->stream_status->plane_states[m];
2540                         bundle->surface_updates[m].surface->force_full_update =
2541                                 true;
2542                 }
2543                 dc_commit_updates_for_stream(
2544                         dm->dc, bundle->surface_updates,
2545                         dc_state->stream_status->plane_count,
2546                         dc_state->streams[k], &bundle->stream_update, dc_state);
2547         }
2548
2549 cleanup:
2550         kfree(bundle);
2551
2552         return;
2553 }
2554
2555 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2556 {
2557         struct dc_stream_state *stream_state;
2558         struct amdgpu_dm_connector *aconnector = link->priv;
2559         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2560         struct dc_stream_update stream_update;
2561         bool dpms_off = true;
2562
2563         memset(&stream_update, 0, sizeof(stream_update));
2564         stream_update.dpms_off = &dpms_off;
2565
2566         mutex_lock(&adev->dm.dc_lock);
2567         stream_state = dc_stream_find_from_link(link);
2568
2569         if (stream_state == NULL) {
2570                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2571                 mutex_unlock(&adev->dm.dc_lock);
2572                 return;
2573         }
2574
2575         stream_update.stream = stream_state;
2576         acrtc_state->force_dpms_off = true;
2577         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2578                                      stream_state, &stream_update,
2579                                      stream_state->ctx->dc->current_state);
2580         mutex_unlock(&adev->dm.dc_lock);
2581 }
2582
2583 static int dm_resume(void *handle)
2584 {
2585         struct amdgpu_device *adev = handle;
2586         struct drm_device *ddev = adev_to_drm(adev);
2587         struct amdgpu_display_manager *dm = &adev->dm;
2588         struct amdgpu_dm_connector *aconnector;
2589         struct drm_connector *connector;
2590         struct drm_connector_list_iter iter;
2591         struct drm_crtc *crtc;
2592         struct drm_crtc_state *new_crtc_state;
2593         struct dm_crtc_state *dm_new_crtc_state;
2594         struct drm_plane *plane;
2595         struct drm_plane_state *new_plane_state;
2596         struct dm_plane_state *dm_new_plane_state;
2597         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2598         enum dc_connection_type new_connection_type = dc_connection_none;
2599         struct dc_state *dc_state;
2600         int i, r, j;
2601
2602         if (amdgpu_in_reset(adev)) {
2603                 dc_state = dm->cached_dc_state;
2604
2605                 /*
2606                  * The dc->current_state is backed up into dm->cached_dc_state
2607                  * before we commit 0 streams.
2608                  *
2609                  * DC will clear link encoder assignments on the real state
2610                  * but the changes won't propagate over to the copy we made
2611                  * before the 0 streams commit.
2612                  *
2613                  * DC expects that link encoder assignments are *not* valid
2614                  * when committing a state, so as a workaround we can copy
2615                  * off of the current state.
2616                  *
2617                  * We lose the previous assignments, but we had already
2618                  * commit 0 streams anyway.
2619                  */
2620                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2621
2622                 if (dc_enable_dmub_notifications(adev->dm.dc))
2623                         amdgpu_dm_outbox_init(adev);
2624
2625                 r = dm_dmub_hw_init(adev);
2626                 if (r)
2627                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628
2629                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630                 dc_resume(dm->dc);
2631
2632                 amdgpu_dm_irq_resume_early(adev);
2633
2634                 for (i = 0; i < dc_state->stream_count; i++) {
2635                         dc_state->streams[i]->mode_changed = true;
2636                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2637                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2638                                         = 0xffffffff;
2639                         }
2640                 }
2641
2642                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2643
2644                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2645
2646                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2647
2648                 dc_release_state(dm->cached_dc_state);
2649                 dm->cached_dc_state = NULL;
2650
2651                 amdgpu_dm_irq_resume_late(adev);
2652
2653                 mutex_unlock(&dm->dc_lock);
2654
2655                 return 0;
2656         }
2657         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2658         dc_release_state(dm_state->context);
2659         dm_state->context = dc_create_state(dm->dc);
2660         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2661         dc_resource_state_construct(dm->dc, dm_state->context);
2662
2663         /* Re-enable outbox interrupts for DPIA. */
2664         if (dc_enable_dmub_notifications(adev->dm.dc))
2665                 amdgpu_dm_outbox_init(adev);
2666
2667         /* Before powering on DC we need to re-initialize DMUB. */
2668         dm_dmub_hw_resume(adev);
2669
2670         /* power on hardware */
2671         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2672
2673         /* program HPD filter */
2674         dc_resume(dm->dc);
2675
2676         /*
2677          * early enable HPD Rx IRQ, should be done before set mode as short
2678          * pulse interrupts are used for MST
2679          */
2680         amdgpu_dm_irq_resume_early(adev);
2681
2682         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2683         s3_handle_mst(ddev, false);
2684
2685         /* Do detection*/
2686         drm_connector_list_iter_begin(ddev, &iter);
2687         drm_for_each_connector_iter(connector, &iter) {
2688                 aconnector = to_amdgpu_dm_connector(connector);
2689
2690                 /*
2691                  * this is the case when traversing through already created
2692                  * MST connectors, should be skipped
2693                  */
2694                 if (aconnector->dc_link &&
2695                     aconnector->dc_link->type == dc_connection_mst_branch)
2696                         continue;
2697
2698                 mutex_lock(&aconnector->hpd_lock);
2699                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2700                         DRM_ERROR("KMS: Failed to detect connector\n");
2701
2702                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2703                         emulated_link_detect(aconnector->dc_link);
2704                 else
2705                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2706
2707                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708                         aconnector->fake_enable = false;
2709
2710                 if (aconnector->dc_sink)
2711                         dc_sink_release(aconnector->dc_sink);
2712                 aconnector->dc_sink = NULL;
2713                 amdgpu_dm_update_connector_after_detect(aconnector);
2714                 mutex_unlock(&aconnector->hpd_lock);
2715         }
2716         drm_connector_list_iter_end(&iter);
2717
2718         /* Force mode set in atomic commit */
2719         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720                 new_crtc_state->active_changed = true;
2721
2722         /*
2723          * atomic_check is expected to create the dc states. We need to release
2724          * them here, since they were duplicated as part of the suspend
2725          * procedure.
2726          */
2727         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729                 if (dm_new_crtc_state->stream) {
2730                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731                         dc_stream_release(dm_new_crtc_state->stream);
2732                         dm_new_crtc_state->stream = NULL;
2733                 }
2734         }
2735
2736         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738                 if (dm_new_plane_state->dc_state) {
2739                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740                         dc_plane_state_release(dm_new_plane_state->dc_state);
2741                         dm_new_plane_state->dc_state = NULL;
2742                 }
2743         }
2744
2745         drm_atomic_helper_resume(ddev, dm->cached_state);
2746
2747         dm->cached_state = NULL;
2748
2749         amdgpu_dm_irq_resume_late(adev);
2750
2751         amdgpu_dm_smu_write_watermarks_table(adev);
2752
2753         return 0;
2754 }
2755
2756 /**
2757  * DOC: DM Lifecycle
2758  *
2759  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761  * the base driver's device list to be initialized and torn down accordingly.
2762  *
2763  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764  */
2765
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767         .name = "dm",
2768         .early_init = dm_early_init,
2769         .late_init = dm_late_init,
2770         .sw_init = dm_sw_init,
2771         .sw_fini = dm_sw_fini,
2772         .early_fini = amdgpu_dm_early_fini,
2773         .hw_init = dm_hw_init,
2774         .hw_fini = dm_hw_fini,
2775         .suspend = dm_suspend,
2776         .resume = dm_resume,
2777         .is_idle = dm_is_idle,
2778         .wait_for_idle = dm_wait_for_idle,
2779         .check_soft_reset = dm_check_soft_reset,
2780         .soft_reset = dm_soft_reset,
2781         .set_clockgating_state = dm_set_clockgating_state,
2782         .set_powergating_state = dm_set_powergating_state,
2783 };
2784
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787         .type = AMD_IP_BLOCK_TYPE_DCE,
2788         .major = 1,
2789         .minor = 0,
2790         .rev = 0,
2791         .funcs = &amdgpu_dm_funcs,
2792 };
2793
2794
2795 /**
2796  * DOC: atomic
2797  *
2798  * *WIP*
2799  */
2800
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802         .fb_create = amdgpu_display_user_framebuffer_create,
2803         .get_format_info = amd_get_format_info,
2804         .output_poll_changed = drm_fb_helper_output_poll_changed,
2805         .atomic_check = amdgpu_dm_atomic_check,
2806         .atomic_commit = drm_atomic_helper_commit,
2807 };
2808
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815         u32 max_cll, min_cll, max, min, q, r;
2816         struct amdgpu_dm_backlight_caps *caps;
2817         struct amdgpu_display_manager *dm;
2818         struct drm_connector *conn_base;
2819         struct amdgpu_device *adev;
2820         struct dc_link *link = NULL;
2821         static const u8 pre_computed_values[] = {
2822                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824         int i;
2825
2826         if (!aconnector || !aconnector->dc_link)
2827                 return;
2828
2829         link = aconnector->dc_link;
2830         if (link->connector_signal != SIGNAL_TYPE_EDP)
2831                 return;
2832
2833         conn_base = &aconnector->base;
2834         adev = drm_to_adev(conn_base->dev);
2835         dm = &adev->dm;
2836         for (i = 0; i < dm->num_of_edps; i++) {
2837                 if (link == dm->backlight_link[i])
2838                         break;
2839         }
2840         if (i >= dm->num_of_edps)
2841                 return;
2842         caps = &dm->backlight_caps[i];
2843         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844         caps->aux_support = false;
2845         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2846         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847
2848         if (caps->ext_caps->bits.oled == 1 /*||
2849             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851                 caps->aux_support = true;
2852
2853         if (amdgpu_backlight == 0)
2854                 caps->aux_support = false;
2855         else if (amdgpu_backlight == 1)
2856                 caps->aux_support = true;
2857
2858         /* From the specification (CTA-861-G), for calculating the maximum
2859          * luminance we need to use:
2860          *      Luminance = 50*2**(CV/32)
2861          * Where CV is a one-byte value.
2862          * For calculating this expression we may need float point precision;
2863          * to avoid this complexity level, we take advantage that CV is divided
2864          * by a constant. From the Euclids division algorithm, we know that CV
2865          * can be written as: CV = 32*q + r. Next, we replace CV in the
2866          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867          * need to pre-compute the value of r/32. For pre-computing the values
2868          * We just used the following Ruby line:
2869          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870          * The results of the above expressions can be verified at
2871          * pre_computed_values.
2872          */
2873         q = max_cll >> 5;
2874         r = max_cll % 32;
2875         max = (1 << q) * pre_computed_values[r];
2876
2877         // min luminance: maxLum * (CV/255)^2 / 100
2878         q = DIV_ROUND_CLOSEST(min_cll, 255);
2879         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880
2881         caps->aux_max_input_signal = max;
2882         caps->aux_min_input_signal = min;
2883 }
2884
2885 void amdgpu_dm_update_connector_after_detect(
2886                 struct amdgpu_dm_connector *aconnector)
2887 {
2888         struct drm_connector *connector = &aconnector->base;
2889         struct drm_device *dev = connector->dev;
2890         struct dc_sink *sink;
2891
2892         /* MST handled by drm_mst framework */
2893         if (aconnector->mst_mgr.mst_state == true)
2894                 return;
2895
2896         sink = aconnector->dc_link->local_sink;
2897         if (sink)
2898                 dc_sink_retain(sink);
2899
2900         /*
2901          * Edid mgmt connector gets first update only in mode_valid hook and then
2902          * the connector sink is set to either fake or physical sink depends on link status.
2903          * Skip if already done during boot.
2904          */
2905         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906                         && aconnector->dc_em_sink) {
2907
2908                 /*
2909                  * For S3 resume with headless use eml_sink to fake stream
2910                  * because on resume connector->sink is set to NULL
2911                  */
2912                 mutex_lock(&dev->mode_config.mutex);
2913
2914                 if (sink) {
2915                         if (aconnector->dc_sink) {
2916                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2917                                 /*
2918                                  * retain and release below are used to
2919                                  * bump up refcount for sink because the link doesn't point
2920                                  * to it anymore after disconnect, so on next crtc to connector
2921                                  * reshuffle by UMD we will get into unwanted dc_sink release
2922                                  */
2923                                 dc_sink_release(aconnector->dc_sink);
2924                         }
2925                         aconnector->dc_sink = sink;
2926                         dc_sink_retain(aconnector->dc_sink);
2927                         amdgpu_dm_update_freesync_caps(connector,
2928                                         aconnector->edid);
2929                 } else {
2930                         amdgpu_dm_update_freesync_caps(connector, NULL);
2931                         if (!aconnector->dc_sink) {
2932                                 aconnector->dc_sink = aconnector->dc_em_sink;
2933                                 dc_sink_retain(aconnector->dc_sink);
2934                         }
2935                 }
2936
2937                 mutex_unlock(&dev->mode_config.mutex);
2938
2939                 if (sink)
2940                         dc_sink_release(sink);
2941                 return;
2942         }
2943
2944         /*
2945          * TODO: temporary guard to look for proper fix
2946          * if this sink is MST sink, we should not do anything
2947          */
2948         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949                 dc_sink_release(sink);
2950                 return;
2951         }
2952
2953         if (aconnector->dc_sink == sink) {
2954                 /*
2955                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956                  * Do nothing!!
2957                  */
2958                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959                                 aconnector->connector_id);
2960                 if (sink)
2961                         dc_sink_release(sink);
2962                 return;
2963         }
2964
2965         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966                 aconnector->connector_id, aconnector->dc_sink, sink);
2967
2968         mutex_lock(&dev->mode_config.mutex);
2969
2970         /*
2971          * 1. Update status of the drm connector
2972          * 2. Send an event and let userspace tell us what to do
2973          */
2974         if (sink) {
2975                 /*
2976                  * TODO: check if we still need the S3 mode update workaround.
2977                  * If yes, put it here.
2978                  */
2979                 if (aconnector->dc_sink) {
2980                         amdgpu_dm_update_freesync_caps(connector, NULL);
2981                         dc_sink_release(aconnector->dc_sink);
2982                 }
2983
2984                 aconnector->dc_sink = sink;
2985                 dc_sink_retain(aconnector->dc_sink);
2986                 if (sink->dc_edid.length == 0) {
2987                         aconnector->edid = NULL;
2988                         if (aconnector->dc_link->aux_mode) {
2989                                 drm_dp_cec_unset_edid(
2990                                         &aconnector->dm_dp_aux.aux);
2991                         }
2992                 } else {
2993                         aconnector->edid =
2994                                 (struct edid *)sink->dc_edid.raw_edid;
2995
2996                         if (aconnector->dc_link->aux_mode)
2997                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998                                                     aconnector->edid);
2999                 }
3000
3001                 drm_connector_update_edid_property(connector, aconnector->edid);
3002                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003                 update_connector_ext_caps(aconnector);
3004         } else {
3005                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006                 amdgpu_dm_update_freesync_caps(connector, NULL);
3007                 drm_connector_update_edid_property(connector, NULL);
3008                 aconnector->num_modes = 0;
3009                 dc_sink_release(aconnector->dc_sink);
3010                 aconnector->dc_sink = NULL;
3011                 aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017         }
3018
3019         mutex_unlock(&dev->mode_config.mutex);
3020
3021         update_subconnector_property(aconnector);
3022
3023         if (sink)
3024                 dc_sink_release(sink);
3025 }
3026
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029         struct drm_connector *connector = &aconnector->base;
3030         struct drm_device *dev = connector->dev;
3031         enum dc_connection_type new_connection_type = dc_connection_none;
3032         struct amdgpu_device *adev = drm_to_adev(dev);
3033         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3034         struct dm_crtc_state *dm_crtc_state = NULL;
3035
3036         if (adev->dm.disable_hpd_irq)
3037                 return;
3038
3039         if (dm_con_state->base.state && dm_con_state->base.crtc)
3040                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3041                                         dm_con_state->base.state,
3042                                         dm_con_state->base.crtc));
3043         /*
3044          * In case of failure or MST no need to update connector status or notify the OS
3045          * since (for MST case) MST does this in its own context.
3046          */
3047         mutex_lock(&aconnector->hpd_lock);
3048
3049 #ifdef CONFIG_DRM_AMD_DC_HDCP
3050         if (adev->dm.hdcp_workqueue) {
3051                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3052                 dm_con_state->update_hdcp = true;
3053         }
3054 #endif
3055         if (aconnector->fake_enable)
3056                 aconnector->fake_enable = false;
3057
3058         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3059                 DRM_ERROR("KMS: Failed to detect connector\n");
3060
3061         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3062                 emulated_link_detect(aconnector->dc_link);
3063
3064                 drm_modeset_lock_all(dev);
3065                 dm_restore_drm_connector_state(dev, connector);
3066                 drm_modeset_unlock_all(dev);
3067
3068                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3069                         drm_kms_helper_connector_hotplug_event(connector);
3070
3071         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3072                 if (new_connection_type == dc_connection_none &&
3073                     aconnector->dc_link->type == dc_connection_none &&
3074                     dm_crtc_state)
3075                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3076
3077                 amdgpu_dm_update_connector_after_detect(aconnector);
3078
3079                 drm_modeset_lock_all(dev);
3080                 dm_restore_drm_connector_state(dev, connector);
3081                 drm_modeset_unlock_all(dev);
3082
3083                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3084                         drm_kms_helper_connector_hotplug_event(connector);
3085         }
3086         mutex_unlock(&aconnector->hpd_lock);
3087
3088 }
3089
3090 static void handle_hpd_irq(void *param)
3091 {
3092         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3093
3094         handle_hpd_irq_helper(aconnector);
3095
3096 }
3097
3098 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3099 {
3100         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3101         uint8_t dret;
3102         bool new_irq_handled = false;
3103         int dpcd_addr;
3104         int dpcd_bytes_to_read;
3105
3106         const int max_process_count = 30;
3107         int process_count = 0;
3108
3109         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3110
3111         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3112                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3113                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3114                 dpcd_addr = DP_SINK_COUNT;
3115         } else {
3116                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3117                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3118                 dpcd_addr = DP_SINK_COUNT_ESI;
3119         }
3120
3121         dret = drm_dp_dpcd_read(
3122                 &aconnector->dm_dp_aux.aux,
3123                 dpcd_addr,
3124                 esi,
3125                 dpcd_bytes_to_read);
3126
3127         while (dret == dpcd_bytes_to_read &&
3128                 process_count < max_process_count) {
3129                 uint8_t retry;
3130                 dret = 0;
3131
3132                 process_count++;
3133
3134                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3135                 /* handle HPD short pulse irq */
3136                 if (aconnector->mst_mgr.mst_state)
3137                         drm_dp_mst_hpd_irq(
3138                                 &aconnector->mst_mgr,
3139                                 esi,
3140                                 &new_irq_handled);
3141
3142                 if (new_irq_handled) {
3143                         /* ACK at DPCD to notify down stream */
3144                         const int ack_dpcd_bytes_to_write =
3145                                 dpcd_bytes_to_read - 1;
3146
3147                         for (retry = 0; retry < 3; retry++) {
3148                                 uint8_t wret;
3149
3150                                 wret = drm_dp_dpcd_write(
3151                                         &aconnector->dm_dp_aux.aux,
3152                                         dpcd_addr + 1,
3153                                         &esi[1],
3154                                         ack_dpcd_bytes_to_write);
3155                                 if (wret == ack_dpcd_bytes_to_write)
3156                                         break;
3157                         }
3158
3159                         /* check if there is new irq to be handled */
3160                         dret = drm_dp_dpcd_read(
3161                                 &aconnector->dm_dp_aux.aux,
3162                                 dpcd_addr,
3163                                 esi,
3164                                 dpcd_bytes_to_read);
3165
3166                         new_irq_handled = false;
3167                 } else {
3168                         break;
3169                 }
3170         }
3171
3172         if (process_count == max_process_count)
3173                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3174 }
3175
3176 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3177                                                         union hpd_irq_data hpd_irq_data)
3178 {
3179         struct hpd_rx_irq_offload_work *offload_work =
3180                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3181
3182         if (!offload_work) {
3183                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3184                 return;
3185         }
3186
3187         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3188         offload_work->data = hpd_irq_data;
3189         offload_work->offload_wq = offload_wq;
3190
3191         queue_work(offload_wq->wq, &offload_work->work);
3192         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3193 }
3194
3195 static void handle_hpd_rx_irq(void *param)
3196 {
3197         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3198         struct drm_connector *connector = &aconnector->base;
3199         struct drm_device *dev = connector->dev;
3200         struct dc_link *dc_link = aconnector->dc_link;
3201         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3202         bool result = false;
3203         enum dc_connection_type new_connection_type = dc_connection_none;
3204         struct amdgpu_device *adev = drm_to_adev(dev);
3205         union hpd_irq_data hpd_irq_data;
3206         bool link_loss = false;
3207         bool has_left_work = false;
3208         int idx = aconnector->base.index;
3209         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3210
3211         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3212
3213         if (adev->dm.disable_hpd_irq)
3214                 return;
3215
3216         /*
3217          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3218          * conflict, after implement i2c helper, this mutex should be
3219          * retired.
3220          */
3221         mutex_lock(&aconnector->hpd_lock);
3222
3223         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3224                                                 &link_loss, true, &has_left_work);
3225
3226         if (!has_left_work)
3227                 goto out;
3228
3229         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3230                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3231                 goto out;
3232         }
3233
3234         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3235                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3236                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3237                         dm_handle_mst_sideband_msg(aconnector);
3238                         goto out;
3239                 }
3240
3241                 if (link_loss) {
3242                         bool skip = false;
3243
3244                         spin_lock(&offload_wq->offload_lock);
3245                         skip = offload_wq->is_handling_link_loss;
3246
3247                         if (!skip)
3248                                 offload_wq->is_handling_link_loss = true;
3249
3250                         spin_unlock(&offload_wq->offload_lock);
3251
3252                         if (!skip)
3253                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254
3255                         goto out;
3256                 }
3257         }
3258
3259 out:
3260         if (result && !is_mst_root_connector) {
3261                 /* Downstream Port status changed. */
3262                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3263                         DRM_ERROR("KMS: Failed to detect connector\n");
3264
3265                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3266                         emulated_link_detect(dc_link);
3267
3268                         if (aconnector->fake_enable)
3269                                 aconnector->fake_enable = false;
3270
3271                         amdgpu_dm_update_connector_after_detect(aconnector);
3272
3273
3274                         drm_modeset_lock_all(dev);
3275                         dm_restore_drm_connector_state(dev, connector);
3276                         drm_modeset_unlock_all(dev);
3277
3278                         drm_kms_helper_connector_hotplug_event(connector);
3279                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3280
3281                         if (aconnector->fake_enable)
3282                                 aconnector->fake_enable = false;
3283
3284                         amdgpu_dm_update_connector_after_detect(aconnector);
3285
3286
3287                         drm_modeset_lock_all(dev);
3288                         dm_restore_drm_connector_state(dev, connector);
3289                         drm_modeset_unlock_all(dev);
3290
3291                         drm_kms_helper_connector_hotplug_event(connector);
3292                 }
3293         }
3294 #ifdef CONFIG_DRM_AMD_DC_HDCP
3295         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3296                 if (adev->dm.hdcp_workqueue)
3297                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3298         }
3299 #endif
3300
3301         if (dc_link->type != dc_connection_mst_branch)
3302                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3303
3304         mutex_unlock(&aconnector->hpd_lock);
3305 }
3306
3307 static void register_hpd_handlers(struct amdgpu_device *adev)
3308 {
3309         struct drm_device *dev = adev_to_drm(adev);
3310         struct drm_connector *connector;
3311         struct amdgpu_dm_connector *aconnector;
3312         const struct dc_link *dc_link;
3313         struct dc_interrupt_params int_params = {0};
3314
3315         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3316         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3317
3318         list_for_each_entry(connector,
3319                         &dev->mode_config.connector_list, head) {
3320
3321                 aconnector = to_amdgpu_dm_connector(connector);
3322                 dc_link = aconnector->dc_link;
3323
3324                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3325                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3326                         int_params.irq_source = dc_link->irq_source_hpd;
3327
3328                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3329                                         handle_hpd_irq,
3330                                         (void *) aconnector);
3331                 }
3332
3333                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3334
3335                         /* Also register for DP short pulse (hpd_rx). */
3336                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3337                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3338
3339                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3340                                         handle_hpd_rx_irq,
3341                                         (void *) aconnector);
3342
3343                         if (adev->dm.hpd_rx_offload_wq)
3344                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3345                                         aconnector;
3346                 }
3347         }
3348 }
3349
3350 #if defined(CONFIG_DRM_AMD_DC_SI)
3351 /* Register IRQ sources and initialize IRQ callbacks */
3352 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3353 {
3354         struct dc *dc = adev->dm.dc;
3355         struct common_irq_params *c_irq_params;
3356         struct dc_interrupt_params int_params = {0};
3357         int r;
3358         int i;
3359         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3360
3361         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3362         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3363
3364         /*
3365          * Actions of amdgpu_irq_add_id():
3366          * 1. Register a set() function with base driver.
3367          *    Base driver will call set() function to enable/disable an
3368          *    interrupt in DC hardware.
3369          * 2. Register amdgpu_dm_irq_handler().
3370          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3371          *    coming from DC hardware.
3372          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3373          *    for acknowledging and handling. */
3374
3375         /* Use VBLANK interrupt */
3376         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3377                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3378                 if (r) {
3379                         DRM_ERROR("Failed to add crtc irq id!\n");
3380                         return r;
3381                 }
3382
3383                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3384                 int_params.irq_source =
3385                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3386
3387                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3388
3389                 c_irq_params->adev = adev;
3390                 c_irq_params->irq_src = int_params.irq_source;
3391
3392                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3393                                 dm_crtc_high_irq, c_irq_params);
3394         }
3395
3396         /* Use GRPH_PFLIP interrupt */
3397         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3398                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3399                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3400                 if (r) {
3401                         DRM_ERROR("Failed to add page flip irq id!\n");
3402                         return r;
3403                 }
3404
3405                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406                 int_params.irq_source =
3407                         dc_interrupt_to_irq_source(dc, i, 0);
3408
3409                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3410
3411                 c_irq_params->adev = adev;
3412                 c_irq_params->irq_src = int_params.irq_source;
3413
3414                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415                                 dm_pflip_high_irq, c_irq_params);
3416
3417         }
3418
3419         /* HPD */
3420         r = amdgpu_irq_add_id(adev, client_id,
3421                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3422         if (r) {
3423                 DRM_ERROR("Failed to add hpd irq id!\n");
3424                 return r;
3425         }
3426
3427         register_hpd_handlers(adev);
3428
3429         return 0;
3430 }
3431 #endif
3432
3433 /* Register IRQ sources and initialize IRQ callbacks */
3434 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3435 {
3436         struct dc *dc = adev->dm.dc;
3437         struct common_irq_params *c_irq_params;
3438         struct dc_interrupt_params int_params = {0};
3439         int r;
3440         int i;
3441         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3442
3443         if (adev->family >= AMDGPU_FAMILY_AI)
3444                 client_id = SOC15_IH_CLIENTID_DCE;
3445
3446         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3447         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448
3449         /*
3450          * Actions of amdgpu_irq_add_id():
3451          * 1. Register a set() function with base driver.
3452          *    Base driver will call set() function to enable/disable an
3453          *    interrupt in DC hardware.
3454          * 2. Register amdgpu_dm_irq_handler().
3455          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3456          *    coming from DC hardware.
3457          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3458          *    for acknowledging and handling. */
3459
3460         /* Use VBLANK interrupt */
3461         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3462                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3463                 if (r) {
3464                         DRM_ERROR("Failed to add crtc irq id!\n");
3465                         return r;
3466                 }
3467
3468                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469                 int_params.irq_source =
3470                         dc_interrupt_to_irq_source(dc, i, 0);
3471
3472                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3473
3474                 c_irq_params->adev = adev;
3475                 c_irq_params->irq_src = int_params.irq_source;
3476
3477                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478                                 dm_crtc_high_irq, c_irq_params);
3479         }
3480
3481         /* Use VUPDATE interrupt */
3482         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3483                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3484                 if (r) {
3485                         DRM_ERROR("Failed to add vupdate irq id!\n");
3486                         return r;
3487                 }
3488
3489                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490                 int_params.irq_source =
3491                         dc_interrupt_to_irq_source(dc, i, 0);
3492
3493                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3494
3495                 c_irq_params->adev = adev;
3496                 c_irq_params->irq_src = int_params.irq_source;
3497
3498                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499                                 dm_vupdate_high_irq, c_irq_params);
3500         }
3501
3502         /* Use GRPH_PFLIP interrupt */
3503         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3504                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3505                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3506                 if (r) {
3507                         DRM_ERROR("Failed to add page flip irq id!\n");
3508                         return r;
3509                 }
3510
3511                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512                 int_params.irq_source =
3513                         dc_interrupt_to_irq_source(dc, i, 0);
3514
3515                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3516
3517                 c_irq_params->adev = adev;
3518                 c_irq_params->irq_src = int_params.irq_source;
3519
3520                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521                                 dm_pflip_high_irq, c_irq_params);
3522
3523         }
3524
3525         /* HPD */
3526         r = amdgpu_irq_add_id(adev, client_id,
3527                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3528         if (r) {
3529                 DRM_ERROR("Failed to add hpd irq id!\n");
3530                 return r;
3531         }
3532
3533         register_hpd_handlers(adev);
3534
3535         return 0;
3536 }
3537
3538 /* Register IRQ sources and initialize IRQ callbacks */
3539 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3540 {
3541         struct dc *dc = adev->dm.dc;
3542         struct common_irq_params *c_irq_params;
3543         struct dc_interrupt_params int_params = {0};
3544         int r;
3545         int i;
3546 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3547         static const unsigned int vrtl_int_srcid[] = {
3548                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3549                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3550                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3551                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3552                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3553                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3554         };
3555 #endif
3556
3557         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3558         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3559
3560         /*
3561          * Actions of amdgpu_irq_add_id():
3562          * 1. Register a set() function with base driver.
3563          *    Base driver will call set() function to enable/disable an
3564          *    interrupt in DC hardware.
3565          * 2. Register amdgpu_dm_irq_handler().
3566          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3567          *    coming from DC hardware.
3568          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3569          *    for acknowledging and handling.
3570          */
3571
3572         /* Use VSTARTUP interrupt */
3573         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3574                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3575                         i++) {
3576                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3577
3578                 if (r) {
3579                         DRM_ERROR("Failed to add crtc irq id!\n");
3580                         return r;
3581                 }
3582
3583                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584                 int_params.irq_source =
3585                         dc_interrupt_to_irq_source(dc, i, 0);
3586
3587                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3588
3589                 c_irq_params->adev = adev;
3590                 c_irq_params->irq_src = int_params.irq_source;
3591
3592                 amdgpu_dm_irq_register_interrupt(
3593                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3594         }
3595
3596         /* Use otg vertical line interrupt */
3597 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3598         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3599                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3600                                 vrtl_int_srcid[i], &adev->vline0_irq);
3601
3602                 if (r) {
3603                         DRM_ERROR("Failed to add vline0 irq id!\n");
3604                         return r;
3605                 }
3606
3607                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608                 int_params.irq_source =
3609                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3610
3611                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3612                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3613                         break;
3614                 }
3615
3616                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3617                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3618
3619                 c_irq_params->adev = adev;
3620                 c_irq_params->irq_src = int_params.irq_source;
3621
3622                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3623                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3624         }
3625 #endif
3626
3627         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3628          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3629          * to trigger at end of each vblank, regardless of state of the lock,
3630          * matching DCE behaviour.
3631          */
3632         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3633              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3634              i++) {
3635                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3636
3637                 if (r) {
3638                         DRM_ERROR("Failed to add vupdate irq id!\n");
3639                         return r;
3640                 }
3641
3642                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3643                 int_params.irq_source =
3644                         dc_interrupt_to_irq_source(dc, i, 0);
3645
3646                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3647
3648                 c_irq_params->adev = adev;
3649                 c_irq_params->irq_src = int_params.irq_source;
3650
3651                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3652                                 dm_vupdate_high_irq, c_irq_params);
3653         }
3654
3655         /* Use GRPH_PFLIP interrupt */
3656         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3657                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3658                         i++) {
3659                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3660                 if (r) {
3661                         DRM_ERROR("Failed to add page flip irq id!\n");
3662                         return r;
3663                 }
3664
3665                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3666                 int_params.irq_source =
3667                         dc_interrupt_to_irq_source(dc, i, 0);
3668
3669                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3670
3671                 c_irq_params->adev = adev;
3672                 c_irq_params->irq_src = int_params.irq_source;
3673
3674                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675                                 dm_pflip_high_irq, c_irq_params);
3676
3677         }
3678
3679         /* HPD */
3680         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3681                         &adev->hpd_irq);
3682         if (r) {
3683                 DRM_ERROR("Failed to add hpd irq id!\n");
3684                 return r;
3685         }
3686
3687         register_hpd_handlers(adev);
3688
3689         return 0;
3690 }
3691 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3692 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3693 {
3694         struct dc *dc = adev->dm.dc;
3695         struct common_irq_params *c_irq_params;
3696         struct dc_interrupt_params int_params = {0};
3697         int r, i;
3698
3699         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3700         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3701
3702         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3703                         &adev->dmub_outbox_irq);
3704         if (r) {
3705                 DRM_ERROR("Failed to add outbox irq id!\n");
3706                 return r;
3707         }
3708
3709         if (dc->ctx->dmub_srv) {
3710                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3711                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3712                 int_params.irq_source =
3713                 dc_interrupt_to_irq_source(dc, i, 0);
3714
3715                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3716
3717                 c_irq_params->adev = adev;
3718                 c_irq_params->irq_src = int_params.irq_source;
3719
3720                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3721                                 dm_dmub_outbox1_low_irq, c_irq_params);
3722         }
3723
3724         return 0;
3725 }
3726
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 int dm_atomic_get_state(struct drm_atomic_state *state,
3734                         struct dm_atomic_state **dm_state)
3735 {
3736         struct drm_device *dev = state->dev;
3737         struct amdgpu_device *adev = drm_to_adev(dev);
3738         struct amdgpu_display_manager *dm = &adev->dm;
3739         struct drm_private_state *priv_state;
3740
3741         if (*dm_state)
3742                 return 0;
3743
3744         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745         if (IS_ERR(priv_state))
3746                 return PTR_ERR(priv_state);
3747
3748         *dm_state = to_dm_atomic_state(priv_state);
3749
3750         return 0;
3751 }
3752
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756         struct drm_device *dev = state->dev;
3757         struct amdgpu_device *adev = drm_to_adev(dev);
3758         struct amdgpu_display_manager *dm = &adev->dm;
3759         struct drm_private_obj *obj;
3760         struct drm_private_state *new_obj_state;
3761         int i;
3762
3763         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764                 if (obj->funcs == dm->atomic_obj.funcs)
3765                         return to_dm_atomic_state(new_obj_state);
3766         }
3767
3768         return NULL;
3769 }
3770
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774         struct dm_atomic_state *old_state, *new_state;
3775
3776         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777         if (!new_state)
3778                 return NULL;
3779
3780         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781
3782         old_state = to_dm_atomic_state(obj->state);
3783
3784         if (old_state && old_state->context)
3785                 new_state->context = dc_copy_state(old_state->context);
3786
3787         if (!new_state->context) {
3788                 kfree(new_state);
3789                 return NULL;
3790         }
3791
3792         return &new_state->base;
3793 }
3794
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796                                     struct drm_private_state *state)
3797 {
3798         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799
3800         if (dm_state && dm_state->context)
3801                 dc_release_state(dm_state->context);
3802
3803         kfree(dm_state);
3804 }
3805
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807         .atomic_duplicate_state = dm_atomic_duplicate_state,
3808         .atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813         struct dm_atomic_state *state;
3814         int r;
3815
3816         adev->mode_info.mode_config_initialized = true;
3817
3818         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820
3821         adev_to_drm(adev)->mode_config.max_width = 16384;
3822         adev_to_drm(adev)->mode_config.max_height = 16384;
3823
3824         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826         /* indicates support for immediate flip */
3827         adev_to_drm(adev)->mode_config.async_page_flip = true;
3828
3829         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3830
3831         state = kzalloc(sizeof(*state), GFP_KERNEL);
3832         if (!state)
3833                 return -ENOMEM;
3834
3835         state->context = dc_create_state(adev->dm.dc);
3836         if (!state->context) {
3837                 kfree(state);
3838                 return -ENOMEM;
3839         }
3840
3841         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842
3843         drm_atomic_private_obj_init(adev_to_drm(adev),
3844                                     &adev->dm.atomic_obj,
3845                                     &state->base,
3846                                     &dm_atomic_state_funcs);
3847
3848         r = amdgpu_display_modeset_create_props(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         r = amdgpu_dm_audio_init(adev);
3856         if (r) {
3857                 dc_release_state(state->context);
3858                 kfree(state);
3859                 return r;
3860         }
3861
3862         return 0;
3863 }
3864
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3868
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3871
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3873                                             int bl_idx)
3874 {
3875 #if defined(CONFIG_ACPI)
3876         struct amdgpu_dm_backlight_caps caps;
3877
3878         memset(&caps, 0, sizeof(caps));
3879
3880         if (dm->backlight_caps[bl_idx].caps_valid)
3881                 return;
3882
3883         amdgpu_acpi_get_backlight_caps(&caps);
3884         if (caps.caps_valid) {
3885                 dm->backlight_caps[bl_idx].caps_valid = true;
3886                 if (caps.aux_support)
3887                         return;
3888                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3890         } else {
3891                 dm->backlight_caps[bl_idx].min_input_signal =
3892                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893                 dm->backlight_caps[bl_idx].max_input_signal =
3894                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895         }
3896 #else
3897         if (dm->backlight_caps[bl_idx].aux_support)
3898                 return;
3899
3900         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3902 #endif
3903 }
3904
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906                                 unsigned *min, unsigned *max)
3907 {
3908         if (!caps)
3909                 return 0;
3910
3911         if (caps->aux_support) {
3912                 // Firmware limits are in nits, DC API wants millinits.
3913                 *max = 1000 * caps->aux_max_input_signal;
3914                 *min = 1000 * caps->aux_min_input_signal;
3915         } else {
3916                 // Firmware limits are 8-bit, PWM control is 16-bit.
3917                 *max = 0x101 * caps->max_input_signal;
3918                 *min = 0x101 * caps->min_input_signal;
3919         }
3920         return 1;
3921 }
3922
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924                                         uint32_t brightness)
3925 {
3926         unsigned min, max;
3927
3928         if (!get_brightness_range(caps, &min, &max))
3929                 return brightness;
3930
3931         // Rescale 0..255 to min..max
3932         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933                                        AMDGPU_MAX_BL_LEVEL);
3934 }
3935
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937                                       uint32_t brightness)
3938 {
3939         unsigned min, max;
3940
3941         if (!get_brightness_range(caps, &min, &max))
3942                 return brightness;
3943
3944         if (brightness < min)
3945                 return 0;
3946         // Rescale min..max to 0..255
3947         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3948                                  max - min);
3949 }
3950
3951 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3952                                          int bl_idx,
3953                                          u32 user_brightness)
3954 {
3955         struct amdgpu_dm_backlight_caps caps;
3956         struct dc_link *link;
3957         u32 brightness;
3958         bool rc;
3959
3960         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961         caps = dm->backlight_caps[bl_idx];
3962
3963         dm->brightness[bl_idx] = user_brightness;
3964         /* update scratch register */
3965         if (bl_idx == 0)
3966                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968         link = (struct dc_link *)dm->backlight_link[bl_idx];
3969
3970         /* Change brightness based on AUX property */
3971         if (caps.aux_support) {
3972                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3974                 if (!rc)
3975                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3976         } else {
3977                 rc = dc_link_set_backlight_level(link, brightness, 0);
3978                 if (!rc)
3979                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3980         }
3981
3982         if (rc)
3983                 dm->actual_brightness[bl_idx] = user_brightness;
3984 }
3985
3986 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3987 {
3988         struct amdgpu_display_manager *dm = bl_get_data(bd);
3989         int i;
3990
3991         for (i = 0; i < dm->num_of_edps; i++) {
3992                 if (bd == dm->backlight_dev[i])
3993                         break;
3994         }
3995         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3996                 i = 0;
3997         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3998
3999         return 0;
4000 }
4001
4002 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4003                                          int bl_idx)
4004 {
4005         struct amdgpu_dm_backlight_caps caps;
4006         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4007
4008         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4009         caps = dm->backlight_caps[bl_idx];
4010
4011         if (caps.aux_support) {
4012                 u32 avg, peak;
4013                 bool rc;
4014
4015                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4016                 if (!rc)
4017                         return dm->brightness[bl_idx];
4018                 return convert_brightness_to_user(&caps, avg);
4019         } else {
4020                 int ret = dc_link_get_backlight_level(link);
4021
4022                 if (ret == DC_ERROR_UNEXPECTED)
4023                         return dm->brightness[bl_idx];
4024                 return convert_brightness_to_user(&caps, ret);
4025         }
4026 }
4027
4028 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4029 {
4030         struct amdgpu_display_manager *dm = bl_get_data(bd);
4031         int i;
4032
4033         for (i = 0; i < dm->num_of_edps; i++) {
4034                 if (bd == dm->backlight_dev[i])
4035                         break;
4036         }
4037         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4038                 i = 0;
4039         return amdgpu_dm_backlight_get_level(dm, i);
4040 }
4041
4042 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4043         .options = BL_CORE_SUSPENDRESUME,
4044         .get_brightness = amdgpu_dm_backlight_get_brightness,
4045         .update_status  = amdgpu_dm_backlight_update_status,
4046 };
4047
4048 static void
4049 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4050 {
4051         char bl_name[16];
4052         struct backlight_properties props = { 0 };
4053
4054         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4055         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4056
4057         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4058         props.brightness = AMDGPU_MAX_BL_LEVEL;
4059         props.type = BACKLIGHT_RAW;
4060
4061         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4062                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4063
4064         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4065                                                                        adev_to_drm(dm->adev)->dev,
4066                                                                        dm,
4067                                                                        &amdgpu_dm_backlight_ops,
4068                                                                        &props);
4069
4070         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4071                 DRM_ERROR("DM: Backlight registration failed!\n");
4072         else
4073                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4074 }
4075 #endif
4076
4077 static int initialize_plane(struct amdgpu_display_manager *dm,
4078                             struct amdgpu_mode_info *mode_info, int plane_id,
4079                             enum drm_plane_type plane_type,
4080                             const struct dc_plane_cap *plane_cap)
4081 {
4082         struct drm_plane *plane;
4083         unsigned long possible_crtcs;
4084         int ret = 0;
4085
4086         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4087         if (!plane) {
4088                 DRM_ERROR("KMS: Failed to allocate plane\n");
4089                 return -ENOMEM;
4090         }
4091         plane->type = plane_type;
4092
4093         /*
4094          * HACK: IGT tests expect that the primary plane for a CRTC
4095          * can only have one possible CRTC. Only expose support for
4096          * any CRTC if they're not going to be used as a primary plane
4097          * for a CRTC - like overlay or underlay planes.
4098          */
4099         possible_crtcs = 1 << plane_id;
4100         if (plane_id >= dm->dc->caps.max_streams)
4101                 possible_crtcs = 0xff;
4102
4103         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4104
4105         if (ret) {
4106                 DRM_ERROR("KMS: Failed to initialize plane\n");
4107                 kfree(plane);
4108                 return ret;
4109         }
4110
4111         if (mode_info)
4112                 mode_info->planes[plane_id] = plane;
4113
4114         return ret;
4115 }
4116
4117
4118 static void register_backlight_device(struct amdgpu_display_manager *dm,
4119                                       struct dc_link *link)
4120 {
4121 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4122         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4123
4124         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4125             link->type != dc_connection_none) {
4126                 /*
4127                  * Event if registration failed, we should continue with
4128                  * DM initialization because not having a backlight control
4129                  * is better then a black screen.
4130                  */
4131                 if (!dm->backlight_dev[dm->num_of_edps])
4132                         amdgpu_dm_register_backlight_device(dm);
4133
4134                 if (dm->backlight_dev[dm->num_of_edps]) {
4135                         dm->backlight_link[dm->num_of_edps] = link;
4136                         dm->num_of_edps++;
4137                 }
4138         }
4139 #endif
4140 }
4141
4142
4143 /*
4144  * In this architecture, the association
4145  * connector -> encoder -> crtc
4146  * id not really requried. The crtc and connector will hold the
4147  * display_index as an abstraction to use with DAL component
4148  *
4149  * Returns 0 on success
4150  */
4151 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4152 {
4153         struct amdgpu_display_manager *dm = &adev->dm;
4154         int32_t i;
4155         struct amdgpu_dm_connector *aconnector = NULL;
4156         struct amdgpu_encoder *aencoder = NULL;
4157         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4158         uint32_t link_cnt;
4159         int32_t primary_planes;
4160         enum dc_connection_type new_connection_type = dc_connection_none;
4161         const struct dc_plane_cap *plane;
4162         bool psr_feature_enabled = false;
4163
4164         dm->display_indexes_num = dm->dc->caps.max_streams;
4165         /* Update the actual used number of crtc */
4166         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4167
4168         link_cnt = dm->dc->caps.max_links;
4169         if (amdgpu_dm_mode_config_init(dm->adev)) {
4170                 DRM_ERROR("DM: Failed to initialize mode config\n");
4171                 return -EINVAL;
4172         }
4173
4174         /* There is one primary plane per CRTC */
4175         primary_planes = dm->dc->caps.max_streams;
4176         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4177
4178         /*
4179          * Initialize primary planes, implicit planes for legacy IOCTLS.
4180          * Order is reversed to match iteration order in atomic check.
4181          */
4182         for (i = (primary_planes - 1); i >= 0; i--) {
4183                 plane = &dm->dc->caps.planes[i];
4184
4185                 if (initialize_plane(dm, mode_info, i,
4186                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4187                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4188                         goto fail;
4189                 }
4190         }
4191
4192         /*
4193          * Initialize overlay planes, index starting after primary planes.
4194          * These planes have a higher DRM index than the primary planes since
4195          * they should be considered as having a higher z-order.
4196          * Order is reversed to match iteration order in atomic check.
4197          *
4198          * Only support DCN for now, and only expose one so we don't encourage
4199          * userspace to use up all the pipes.
4200          */
4201         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4202                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4203
4204                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4205                         continue;
4206
4207                 if (!plane->blends_with_above || !plane->blends_with_below)
4208                         continue;
4209
4210                 if (!plane->pixel_format_support.argb8888)
4211                         continue;
4212
4213                 if (initialize_plane(dm, NULL, primary_planes + i,
4214                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4215                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4216                         goto fail;
4217                 }
4218
4219                 /* Only create one overlay plane. */
4220                 break;
4221         }
4222
4223         for (i = 0; i < dm->dc->caps.max_streams; i++)
4224                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4225                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4226                         goto fail;
4227                 }
4228
4229         /* Use Outbox interrupt */
4230         switch (adev->ip_versions[DCE_HWIP][0]) {
4231         case IP_VERSION(3, 0, 0):
4232         case IP_VERSION(3, 1, 2):
4233         case IP_VERSION(3, 1, 3):
4234         case IP_VERSION(3, 1, 5):
4235         case IP_VERSION(3, 1, 6):
4236         case IP_VERSION(2, 1, 0):
4237                 if (register_outbox_irq_handlers(dm->adev)) {
4238                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4239                         goto fail;
4240                 }
4241                 break;
4242         default:
4243                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4244                               adev->ip_versions[DCE_HWIP][0]);
4245         }
4246
4247         /* Determine whether to enable PSR support by default. */
4248         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4249                 switch (adev->ip_versions[DCE_HWIP][0]) {
4250                 case IP_VERSION(3, 1, 2):
4251                 case IP_VERSION(3, 1, 3):
4252                 case IP_VERSION(3, 1, 5):
4253                 case IP_VERSION(3, 1, 6):
4254                         psr_feature_enabled = true;
4255                         break;
4256                 default:
4257                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4258                         break;
4259                 }
4260         }
4261
4262         /* Disable vblank IRQs aggressively for power-saving. */
4263         adev_to_drm(adev)->vblank_disable_immediate = true;
4264
4265         /* loops over all connectors on the board */
4266         for (i = 0; i < link_cnt; i++) {
4267                 struct dc_link *link = NULL;
4268
4269                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4270                         DRM_ERROR(
4271                                 "KMS: Cannot support more than %d display indexes\n",
4272                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4273                         continue;
4274                 }
4275
4276                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4277                 if (!aconnector)
4278                         goto fail;
4279
4280                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4281                 if (!aencoder)
4282                         goto fail;
4283
4284                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4285                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4286                         goto fail;
4287                 }
4288
4289                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4290                         DRM_ERROR("KMS: Failed to initialize connector\n");
4291                         goto fail;
4292                 }
4293
4294                 link = dc_get_link_at_index(dm->dc, i);
4295
4296                 if (!dc_link_detect_sink(link, &new_connection_type))
4297                         DRM_ERROR("KMS: Failed to detect connector\n");
4298
4299                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4300                         emulated_link_detect(link);
4301                         amdgpu_dm_update_connector_after_detect(aconnector);
4302
4303                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4304                         amdgpu_dm_update_connector_after_detect(aconnector);
4305                         register_backlight_device(dm, link);
4306                         if (dm->num_of_edps)
4307                                 update_connector_ext_caps(aconnector);
4308                         if (psr_feature_enabled)
4309                                 amdgpu_dm_set_psr_caps(link);
4310
4311                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4312                          * PSR is also supported.
4313                          */
4314                         if (link->psr_settings.psr_feature_enabled)
4315                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4316                 }
4317
4318
4319         }
4320
4321         /* Software is initialized. Now we can register interrupt handlers. */
4322         switch (adev->asic_type) {
4323 #if defined(CONFIG_DRM_AMD_DC_SI)
4324         case CHIP_TAHITI:
4325         case CHIP_PITCAIRN:
4326         case CHIP_VERDE:
4327         case CHIP_OLAND:
4328                 if (dce60_register_irq_handlers(dm->adev)) {
4329                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4330                         goto fail;
4331                 }
4332                 break;
4333 #endif
4334         case CHIP_BONAIRE:
4335         case CHIP_HAWAII:
4336         case CHIP_KAVERI:
4337         case CHIP_KABINI:
4338         case CHIP_MULLINS:
4339         case CHIP_TONGA:
4340         case CHIP_FIJI:
4341         case CHIP_CARRIZO:
4342         case CHIP_STONEY:
4343         case CHIP_POLARIS11:
4344         case CHIP_POLARIS10:
4345         case CHIP_POLARIS12:
4346         case CHIP_VEGAM:
4347         case CHIP_VEGA10:
4348         case CHIP_VEGA12:
4349         case CHIP_VEGA20:
4350                 if (dce110_register_irq_handlers(dm->adev)) {
4351                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4352                         goto fail;
4353                 }
4354                 break;
4355         default:
4356                 switch (adev->ip_versions[DCE_HWIP][0]) {
4357                 case IP_VERSION(1, 0, 0):
4358                 case IP_VERSION(1, 0, 1):
4359                 case IP_VERSION(2, 0, 2):
4360                 case IP_VERSION(2, 0, 3):
4361                 case IP_VERSION(2, 0, 0):
4362                 case IP_VERSION(2, 1, 0):
4363                 case IP_VERSION(3, 0, 0):
4364                 case IP_VERSION(3, 0, 2):
4365                 case IP_VERSION(3, 0, 3):
4366                 case IP_VERSION(3, 0, 1):
4367                 case IP_VERSION(3, 1, 2):
4368                 case IP_VERSION(3, 1, 3):
4369                 case IP_VERSION(3, 1, 5):
4370                 case IP_VERSION(3, 1, 6):
4371                         if (dcn10_register_irq_handlers(dm->adev)) {
4372                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4373                                 goto fail;
4374                         }
4375                         break;
4376                 default:
4377                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4378                                         adev->ip_versions[DCE_HWIP][0]);
4379                         goto fail;
4380                 }
4381                 break;
4382         }
4383
4384         return 0;
4385 fail:
4386         kfree(aencoder);
4387         kfree(aconnector);
4388
4389         return -EINVAL;
4390 }
4391
4392 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4393 {
4394         drm_atomic_private_obj_fini(&dm->atomic_obj);
4395         return;
4396 }
4397
4398 /******************************************************************************
4399  * amdgpu_display_funcs functions
4400  *****************************************************************************/
4401
4402 /*
4403  * dm_bandwidth_update - program display watermarks
4404  *
4405  * @adev: amdgpu_device pointer
4406  *
4407  * Calculate and program the display watermarks and line buffer allocation.
4408  */
4409 static void dm_bandwidth_update(struct amdgpu_device *adev)
4410 {
4411         /* TODO: implement later */
4412 }
4413
4414 static const struct amdgpu_display_funcs dm_display_funcs = {
4415         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4416         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4417         .backlight_set_level = NULL, /* never called for DC */
4418         .backlight_get_level = NULL, /* never called for DC */
4419         .hpd_sense = NULL,/* called unconditionally */
4420         .hpd_set_polarity = NULL, /* called unconditionally */
4421         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4422         .page_flip_get_scanoutpos =
4423                 dm_crtc_get_scanoutpos,/* called unconditionally */
4424         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4425         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4426 };
4427
4428 #if defined(CONFIG_DEBUG_KERNEL_DC)
4429
4430 static ssize_t s3_debug_store(struct device *device,
4431                               struct device_attribute *attr,
4432                               const char *buf,
4433                               size_t count)
4434 {
4435         int ret;
4436         int s3_state;
4437         struct drm_device *drm_dev = dev_get_drvdata(device);
4438         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4439
4440         ret = kstrtoint(buf, 0, &s3_state);
4441
4442         if (ret == 0) {
4443                 if (s3_state) {
4444                         dm_resume(adev);
4445                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4446                 } else
4447                         dm_suspend(adev);
4448         }
4449
4450         return ret == 0 ? count : 0;
4451 }
4452
4453 DEVICE_ATTR_WO(s3_debug);
4454
4455 #endif
4456
4457 static int dm_early_init(void *handle)
4458 {
4459         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4460
4461         switch (adev->asic_type) {
4462 #if defined(CONFIG_DRM_AMD_DC_SI)
4463         case CHIP_TAHITI:
4464         case CHIP_PITCAIRN:
4465         case CHIP_VERDE:
4466                 adev->mode_info.num_crtc = 6;
4467                 adev->mode_info.num_hpd = 6;
4468                 adev->mode_info.num_dig = 6;
4469                 break;
4470         case CHIP_OLAND:
4471                 adev->mode_info.num_crtc = 2;
4472                 adev->mode_info.num_hpd = 2;
4473                 adev->mode_info.num_dig = 2;
4474                 break;
4475 #endif
4476         case CHIP_BONAIRE:
4477         case CHIP_HAWAII:
4478                 adev->mode_info.num_crtc = 6;
4479                 adev->mode_info.num_hpd = 6;
4480                 adev->mode_info.num_dig = 6;
4481                 break;
4482         case CHIP_KAVERI:
4483                 adev->mode_info.num_crtc = 4;
4484                 adev->mode_info.num_hpd = 6;
4485                 adev->mode_info.num_dig = 7;
4486                 break;
4487         case CHIP_KABINI:
4488         case CHIP_MULLINS:
4489                 adev->mode_info.num_crtc = 2;
4490                 adev->mode_info.num_hpd = 6;
4491                 adev->mode_info.num_dig = 6;
4492                 break;
4493         case CHIP_FIJI:
4494         case CHIP_TONGA:
4495                 adev->mode_info.num_crtc = 6;
4496                 adev->mode_info.num_hpd = 6;
4497                 adev->mode_info.num_dig = 7;
4498                 break;
4499         case CHIP_CARRIZO:
4500                 adev->mode_info.num_crtc = 3;
4501                 adev->mode_info.num_hpd = 6;
4502                 adev->mode_info.num_dig = 9;
4503                 break;
4504         case CHIP_STONEY:
4505                 adev->mode_info.num_crtc = 2;
4506                 adev->mode_info.num_hpd = 6;
4507                 adev->mode_info.num_dig = 9;
4508                 break;
4509         case CHIP_POLARIS11:
4510         case CHIP_POLARIS12:
4511                 adev->mode_info.num_crtc = 5;
4512                 adev->mode_info.num_hpd = 5;
4513                 adev->mode_info.num_dig = 5;
4514                 break;
4515         case CHIP_POLARIS10:
4516         case CHIP_VEGAM:
4517                 adev->mode_info.num_crtc = 6;
4518                 adev->mode_info.num_hpd = 6;
4519                 adev->mode_info.num_dig = 6;
4520                 break;
4521         case CHIP_VEGA10:
4522         case CHIP_VEGA12:
4523         case CHIP_VEGA20:
4524                 adev->mode_info.num_crtc = 6;
4525                 adev->mode_info.num_hpd = 6;
4526                 adev->mode_info.num_dig = 6;
4527                 break;
4528         default:
4529
4530                 switch (adev->ip_versions[DCE_HWIP][0]) {
4531                 case IP_VERSION(2, 0, 2):
4532                 case IP_VERSION(3, 0, 0):
4533                         adev->mode_info.num_crtc = 6;
4534                         adev->mode_info.num_hpd = 6;
4535                         adev->mode_info.num_dig = 6;
4536                         break;
4537                 case IP_VERSION(2, 0, 0):
4538                 case IP_VERSION(3, 0, 2):
4539                         adev->mode_info.num_crtc = 5;
4540                         adev->mode_info.num_hpd = 5;
4541                         adev->mode_info.num_dig = 5;
4542                         break;
4543                 case IP_VERSION(2, 0, 3):
4544                 case IP_VERSION(3, 0, 3):
4545                         adev->mode_info.num_crtc = 2;
4546                         adev->mode_info.num_hpd = 2;
4547                         adev->mode_info.num_dig = 2;
4548                         break;
4549                 case IP_VERSION(1, 0, 0):
4550                 case IP_VERSION(1, 0, 1):
4551                 case IP_VERSION(3, 0, 1):
4552                 case IP_VERSION(2, 1, 0):
4553                 case IP_VERSION(3, 1, 2):
4554                 case IP_VERSION(3, 1, 3):
4555                 case IP_VERSION(3, 1, 5):
4556                 case IP_VERSION(3, 1, 6):
4557                         adev->mode_info.num_crtc = 4;
4558                         adev->mode_info.num_hpd = 4;
4559                         adev->mode_info.num_dig = 4;
4560                         break;
4561                 default:
4562                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4563                                         adev->ip_versions[DCE_HWIP][0]);
4564                         return -EINVAL;
4565                 }
4566                 break;
4567         }
4568
4569         amdgpu_dm_set_irq_funcs(adev);
4570
4571         if (adev->mode_info.funcs == NULL)
4572                 adev->mode_info.funcs = &dm_display_funcs;
4573
4574         /*
4575          * Note: Do NOT change adev->audio_endpt_rreg and
4576          * adev->audio_endpt_wreg because they are initialised in
4577          * amdgpu_device_init()
4578          */
4579 #if defined(CONFIG_DEBUG_KERNEL_DC)
4580         device_create_file(
4581                 adev_to_drm(adev)->dev,
4582                 &dev_attr_s3_debug);
4583 #endif
4584
4585         return 0;
4586 }
4587
4588 static bool modeset_required(struct drm_crtc_state *crtc_state,
4589                              struct dc_stream_state *new_stream,
4590                              struct dc_stream_state *old_stream)
4591 {
4592         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4593 }
4594
4595 static bool modereset_required(struct drm_crtc_state *crtc_state)
4596 {
4597         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4598 }
4599
4600 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4601 {
4602         drm_encoder_cleanup(encoder);
4603         kfree(encoder);
4604 }
4605
4606 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4607         .destroy = amdgpu_dm_encoder_destroy,
4608 };
4609
4610
4611 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4612                                          struct drm_framebuffer *fb,
4613                                          int *min_downscale, int *max_upscale)
4614 {
4615         struct amdgpu_device *adev = drm_to_adev(dev);
4616         struct dc *dc = adev->dm.dc;
4617         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4618         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4619
4620         switch (fb->format->format) {
4621         case DRM_FORMAT_P010:
4622         case DRM_FORMAT_NV12:
4623         case DRM_FORMAT_NV21:
4624                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4625                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4626                 break;
4627
4628         case DRM_FORMAT_XRGB16161616F:
4629         case DRM_FORMAT_ARGB16161616F:
4630         case DRM_FORMAT_XBGR16161616F:
4631         case DRM_FORMAT_ABGR16161616F:
4632                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4633                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4634                 break;
4635
4636         default:
4637                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4638                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4639                 break;
4640         }
4641
4642         /*
4643          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4644          * scaling factor of 1.0 == 1000 units.
4645          */
4646         if (*max_upscale == 1)
4647                 *max_upscale = 1000;
4648
4649         if (*min_downscale == 1)
4650                 *min_downscale = 1000;
4651 }
4652
4653
4654 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4655                                 const struct drm_plane_state *state,
4656                                 struct dc_scaling_info *scaling_info)
4657 {
4658         int scale_w, scale_h, min_downscale, max_upscale;
4659
4660         memset(scaling_info, 0, sizeof(*scaling_info));
4661
4662         /* Source is fixed 16.16 but we ignore mantissa for now... */
4663         scaling_info->src_rect.x = state->src_x >> 16;
4664         scaling_info->src_rect.y = state->src_y >> 16;
4665
4666         /*
4667          * For reasons we don't (yet) fully understand a non-zero
4668          * src_y coordinate into an NV12 buffer can cause a
4669          * system hang on DCN1x.
4670          * To avoid hangs (and maybe be overly cautious)
4671          * let's reject both non-zero src_x and src_y.
4672          *
4673          * We currently know of only one use-case to reproduce a
4674          * scenario with non-zero src_x and src_y for NV12, which
4675          * is to gesture the YouTube Android app into full screen
4676          * on ChromeOS.
4677          */
4678         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4679             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4680             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4681             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4682                 return -EINVAL;
4683
4684         scaling_info->src_rect.width = state->src_w >> 16;
4685         if (scaling_info->src_rect.width == 0)
4686                 return -EINVAL;
4687
4688         scaling_info->src_rect.height = state->src_h >> 16;
4689         if (scaling_info->src_rect.height == 0)
4690                 return -EINVAL;
4691
4692         scaling_info->dst_rect.x = state->crtc_x;
4693         scaling_info->dst_rect.y = state->crtc_y;
4694
4695         if (state->crtc_w == 0)
4696                 return -EINVAL;
4697
4698         scaling_info->dst_rect.width = state->crtc_w;
4699
4700         if (state->crtc_h == 0)
4701                 return -EINVAL;
4702
4703         scaling_info->dst_rect.height = state->crtc_h;
4704
4705         /* DRM doesn't specify clipping on destination output. */
4706         scaling_info->clip_rect = scaling_info->dst_rect;
4707
4708         /* Validate scaling per-format with DC plane caps */
4709         if (state->plane && state->plane->dev && state->fb) {
4710                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4711                                              &min_downscale, &max_upscale);
4712         } else {
4713                 min_downscale = 250;
4714                 max_upscale = 16000;
4715         }
4716
4717         scale_w = scaling_info->dst_rect.width * 1000 /
4718                   scaling_info->src_rect.width;
4719
4720         if (scale_w < min_downscale || scale_w > max_upscale)
4721                 return -EINVAL;
4722
4723         scale_h = scaling_info->dst_rect.height * 1000 /
4724                   scaling_info->src_rect.height;
4725
4726         if (scale_h < min_downscale || scale_h > max_upscale)
4727                 return -EINVAL;
4728
4729         /*
4730          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4731          * assume reasonable defaults based on the format.
4732          */
4733
4734         return 0;
4735 }
4736
4737 static void
4738 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4739                                  uint64_t tiling_flags)
4740 {
4741         /* Fill GFX8 params */
4742         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4743                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4744
4745                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4746                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4747                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4748                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4749                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4750
4751                 /* XXX fix me for VI */
4752                 tiling_info->gfx8.num_banks = num_banks;
4753                 tiling_info->gfx8.array_mode =
4754                                 DC_ARRAY_2D_TILED_THIN1;
4755                 tiling_info->gfx8.tile_split = tile_split;
4756                 tiling_info->gfx8.bank_width = bankw;
4757                 tiling_info->gfx8.bank_height = bankh;
4758                 tiling_info->gfx8.tile_aspect = mtaspect;
4759                 tiling_info->gfx8.tile_mode =
4760                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4761         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4762                         == DC_ARRAY_1D_TILED_THIN1) {
4763                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4764         }
4765
4766         tiling_info->gfx8.pipe_config =
4767                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4768 }
4769
4770 static void
4771 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4772                                   union dc_tiling_info *tiling_info)
4773 {
4774         tiling_info->gfx9.num_pipes =
4775                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4776         tiling_info->gfx9.num_banks =
4777                 adev->gfx.config.gb_addr_config_fields.num_banks;
4778         tiling_info->gfx9.pipe_interleave =
4779                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4780         tiling_info->gfx9.num_shader_engines =
4781                 adev->gfx.config.gb_addr_config_fields.num_se;
4782         tiling_info->gfx9.max_compressed_frags =
4783                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4784         tiling_info->gfx9.num_rb_per_se =
4785                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4786         tiling_info->gfx9.shaderEnable = 1;
4787         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4788                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4789 }
4790
4791 static int
4792 validate_dcc(struct amdgpu_device *adev,
4793              const enum surface_pixel_format format,
4794              const enum dc_rotation_angle rotation,
4795              const union dc_tiling_info *tiling_info,
4796              const struct dc_plane_dcc_param *dcc,
4797              const struct dc_plane_address *address,
4798              const struct plane_size *plane_size)
4799 {
4800         struct dc *dc = adev->dm.dc;
4801         struct dc_dcc_surface_param input;
4802         struct dc_surface_dcc_cap output;
4803
4804         memset(&input, 0, sizeof(input));
4805         memset(&output, 0, sizeof(output));
4806
4807         if (!dcc->enable)
4808                 return 0;
4809
4810         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4811             !dc->cap_funcs.get_dcc_compression_cap)
4812                 return -EINVAL;
4813
4814         input.format = format;
4815         input.surface_size.width = plane_size->surface_size.width;
4816         input.surface_size.height = plane_size->surface_size.height;
4817         input.swizzle_mode = tiling_info->gfx9.swizzle;
4818
4819         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4820                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4821         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4822                 input.scan = SCAN_DIRECTION_VERTICAL;
4823
4824         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4825                 return -EINVAL;
4826
4827         if (!output.capable)
4828                 return -EINVAL;
4829
4830         if (dcc->independent_64b_blks == 0 &&
4831             output.grph.rgb.independent_64b_blks != 0)
4832                 return -EINVAL;
4833
4834         return 0;
4835 }
4836
4837 static bool
4838 modifier_has_dcc(uint64_t modifier)
4839 {
4840         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4841 }
4842
4843 static unsigned
4844 modifier_gfx9_swizzle_mode(uint64_t modifier)
4845 {
4846         if (modifier == DRM_FORMAT_MOD_LINEAR)
4847                 return 0;
4848
4849         return AMD_FMT_MOD_GET(TILE, modifier);
4850 }
4851
4852 static const struct drm_format_info *
4853 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4854 {
4855         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4856 }
4857
4858 static void
4859 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4860                                     union dc_tiling_info *tiling_info,
4861                                     uint64_t modifier)
4862 {
4863         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4864         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4865         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4866         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4867
4868         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4869
4870         if (!IS_AMD_FMT_MOD(modifier))
4871                 return;
4872
4873         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4874         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4875
4876         if (adev->family >= AMDGPU_FAMILY_NV) {
4877                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4878         } else {
4879                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4880
4881                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4882         }
4883 }
4884
4885 enum dm_micro_swizzle {
4886         MICRO_SWIZZLE_Z = 0,
4887         MICRO_SWIZZLE_S = 1,
4888         MICRO_SWIZZLE_D = 2,
4889         MICRO_SWIZZLE_R = 3
4890 };
4891
4892 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4893                                           uint32_t format,
4894                                           uint64_t modifier)
4895 {
4896         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4897         const struct drm_format_info *info = drm_format_info(format);
4898         int i;
4899
4900         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4901
4902         if (!info)
4903                 return false;
4904
4905         /*
4906          * We always have to allow these modifiers:
4907          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4908          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4909          */
4910         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4911             modifier == DRM_FORMAT_MOD_INVALID) {
4912                 return true;
4913         }
4914
4915         /* Check that the modifier is on the list of the plane's supported modifiers. */
4916         for (i = 0; i < plane->modifier_count; i++) {
4917                 if (modifier == plane->modifiers[i])
4918                         break;
4919         }
4920         if (i == plane->modifier_count)
4921                 return false;
4922
4923         /*
4924          * For D swizzle the canonical modifier depends on the bpp, so check
4925          * it here.
4926          */
4927         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4928             adev->family >= AMDGPU_FAMILY_NV) {
4929                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4930                         return false;
4931         }
4932
4933         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4934             info->cpp[0] < 8)
4935                 return false;
4936
4937         if (modifier_has_dcc(modifier)) {
4938                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4939                 if (info->cpp[0] != 4)
4940                         return false;
4941                 /* We support multi-planar formats, but not when combined with
4942                  * additional DCC metadata planes. */
4943                 if (info->num_planes > 1)
4944                         return false;
4945         }
4946
4947         return true;
4948 }
4949
4950 static void
4951 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4952 {
4953         if (!*mods)
4954                 return;
4955
4956         if (*cap - *size < 1) {
4957                 uint64_t new_cap = *cap * 2;
4958                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4959
4960                 if (!new_mods) {
4961                         kfree(*mods);
4962                         *mods = NULL;
4963                         return;
4964                 }
4965
4966                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4967                 kfree(*mods);
4968                 *mods = new_mods;
4969                 *cap = new_cap;
4970         }
4971
4972         (*mods)[*size] = mod;
4973         *size += 1;
4974 }
4975
4976 static void
4977 add_gfx9_modifiers(const struct amdgpu_device *adev,
4978                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4979 {
4980         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4981         int pipe_xor_bits = min(8, pipes +
4982                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4983         int bank_xor_bits = min(8 - pipe_xor_bits,
4984                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4985         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4986                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4987
4988
4989         if (adev->family == AMDGPU_FAMILY_RV) {
4990                 /* Raven2 and later */
4991                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4992
4993                 /*
4994                  * No _D DCC swizzles yet because we only allow 32bpp, which
4995                  * doesn't support _D on DCN
4996                  */
4997
4998                 if (has_constant_encode) {
4999                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5000                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5001                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5002                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5003                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5004                                     AMD_FMT_MOD_SET(DCC, 1) |
5005                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5006                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5007                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5008                 }
5009
5010                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5012                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5013                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5014                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5015                             AMD_FMT_MOD_SET(DCC, 1) |
5016                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5017                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5018                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5019
5020                 if (has_constant_encode) {
5021                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5022                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5023                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5024                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5025                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5026                                     AMD_FMT_MOD_SET(DCC, 1) |
5027                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5028                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5029                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5030
5031                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5032                                     AMD_FMT_MOD_SET(RB, rb) |
5033                                     AMD_FMT_MOD_SET(PIPE, pipes));
5034                 }
5035
5036                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5037                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5038                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5039                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5040                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5041                             AMD_FMT_MOD_SET(DCC, 1) |
5042                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5043                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5044                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5045                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5046                             AMD_FMT_MOD_SET(RB, rb) |
5047                             AMD_FMT_MOD_SET(PIPE, pipes));
5048         }
5049
5050         /*
5051          * Only supported for 64bpp on Raven, will be filtered on format in
5052          * dm_plane_format_mod_supported.
5053          */
5054         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5055                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5056                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5057                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5058                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5059
5060         if (adev->family == AMDGPU_FAMILY_RV) {
5061                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5062                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5063                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5064                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5065                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5066         }
5067
5068         /*
5069          * Only supported for 64bpp on Raven, will be filtered on format in
5070          * dm_plane_format_mod_supported.
5071          */
5072         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5073                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5074                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5075
5076         if (adev->family == AMDGPU_FAMILY_RV) {
5077                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5078                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5079                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5080         }
5081 }
5082
5083 static void
5084 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5085                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5086 {
5087         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5088
5089         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5090                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5091                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5092                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5093                     AMD_FMT_MOD_SET(DCC, 1) |
5094                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5095                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5096                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5097
5098         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5100                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5101                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102                     AMD_FMT_MOD_SET(DCC, 1) |
5103                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5104                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5105                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5106                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5107
5108         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5111                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5112
5113         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5114                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5115                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5116                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5117
5118
5119         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5120         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5121                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5122                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5123
5124         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5125                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5126                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5127 }
5128
5129 static void
5130 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5131                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5132 {
5133         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5134         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5135
5136         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5137                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5138                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5139                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5140                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5141                     AMD_FMT_MOD_SET(DCC, 1) |
5142                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5143                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5144                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5145                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5146
5147         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5149                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5150                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5152                     AMD_FMT_MOD_SET(DCC, 1) |
5153                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5154                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5155                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5156
5157         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5158                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5159                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5160                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5161                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5162                     AMD_FMT_MOD_SET(DCC, 1) |
5163                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5164                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5165                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5166                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5167                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5168
5169         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5170                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5171                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5172                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5173                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5174                     AMD_FMT_MOD_SET(DCC, 1) |
5175                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5176                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5177                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5178                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5179
5180         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5181                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5182                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5183                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5184                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5185
5186         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5187                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5188                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5189                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5190                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5191
5192         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5193         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5194                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5195                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5196
5197         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5198                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5199                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5200 }
5201
5202 static int
5203 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5204 {
5205         uint64_t size = 0, capacity = 128;
5206         *mods = NULL;
5207
5208         /* We have not hooked up any pre-GFX9 modifiers. */
5209         if (adev->family < AMDGPU_FAMILY_AI)
5210                 return 0;
5211
5212         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5213
5214         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5215                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5216                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5217                 return *mods ? 0 : -ENOMEM;
5218         }
5219
5220         switch (adev->family) {
5221         case AMDGPU_FAMILY_AI:
5222         case AMDGPU_FAMILY_RV:
5223                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5224                 break;
5225         case AMDGPU_FAMILY_NV:
5226         case AMDGPU_FAMILY_VGH:
5227         case AMDGPU_FAMILY_YC:
5228         case AMDGPU_FAMILY_GC_10_3_6:
5229         case AMDGPU_FAMILY_GC_10_3_7:
5230                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5231                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5232                 else
5233                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5234                 break;
5235         }
5236
5237         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5238
5239         /* INVALID marks the end of the list. */
5240         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5241
5242         if (!*mods)
5243                 return -ENOMEM;
5244
5245         return 0;
5246 }
5247
5248 static int
5249 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5250                                           const struct amdgpu_framebuffer *afb,
5251                                           const enum surface_pixel_format format,
5252                                           const enum dc_rotation_angle rotation,
5253                                           const struct plane_size *plane_size,
5254                                           union dc_tiling_info *tiling_info,
5255                                           struct dc_plane_dcc_param *dcc,
5256                                           struct dc_plane_address *address,
5257                                           const bool force_disable_dcc)
5258 {
5259         const uint64_t modifier = afb->base.modifier;
5260         int ret = 0;
5261
5262         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5263         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5264
5265         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5266                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5267                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5268                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5269
5270                 dcc->enable = 1;
5271                 dcc->meta_pitch = afb->base.pitches[1];
5272                 dcc->independent_64b_blks = independent_64b_blks;
5273                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5274                         if (independent_64b_blks && independent_128b_blks)
5275                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5276                         else if (independent_128b_blks)
5277                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5278                         else if (independent_64b_blks && !independent_128b_blks)
5279                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5280                         else
5281                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5282                 } else {
5283                         if (independent_64b_blks)
5284                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5285                         else
5286                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5287                 }
5288
5289                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5290                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5291         }
5292
5293         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5294         if (ret)
5295                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5296
5297         return ret;
5298 }
5299
5300 static int
5301 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5302                              const struct amdgpu_framebuffer *afb,
5303                              const enum surface_pixel_format format,
5304                              const enum dc_rotation_angle rotation,
5305                              const uint64_t tiling_flags,
5306                              union dc_tiling_info *tiling_info,
5307                              struct plane_size *plane_size,
5308                              struct dc_plane_dcc_param *dcc,
5309                              struct dc_plane_address *address,
5310                              bool tmz_surface,
5311                              bool force_disable_dcc)
5312 {
5313         const struct drm_framebuffer *fb = &afb->base;
5314         int ret;
5315
5316         memset(tiling_info, 0, sizeof(*tiling_info));
5317         memset(plane_size, 0, sizeof(*plane_size));
5318         memset(dcc, 0, sizeof(*dcc));
5319         memset(address, 0, sizeof(*address));
5320
5321         address->tmz_surface = tmz_surface;
5322
5323         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5324                 uint64_t addr = afb->address + fb->offsets[0];
5325
5326                 plane_size->surface_size.x = 0;
5327                 plane_size->surface_size.y = 0;
5328                 plane_size->surface_size.width = fb->width;
5329                 plane_size->surface_size.height = fb->height;
5330                 plane_size->surface_pitch =
5331                         fb->pitches[0] / fb->format->cpp[0];
5332
5333                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5334                 address->grph.addr.low_part = lower_32_bits(addr);
5335                 address->grph.addr.high_part = upper_32_bits(addr);
5336         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5337                 uint64_t luma_addr = afb->address + fb->offsets[0];
5338                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5339
5340                 plane_size->surface_size.x = 0;
5341                 plane_size->surface_size.y = 0;
5342                 plane_size->surface_size.width = fb->width;
5343                 plane_size->surface_size.height = fb->height;
5344                 plane_size->surface_pitch =
5345                         fb->pitches[0] / fb->format->cpp[0];
5346
5347                 plane_size->chroma_size.x = 0;
5348                 plane_size->chroma_size.y = 0;
5349                 /* TODO: set these based on surface format */
5350                 plane_size->chroma_size.width = fb->width / 2;
5351                 plane_size->chroma_size.height = fb->height / 2;
5352
5353                 plane_size->chroma_pitch =
5354                         fb->pitches[1] / fb->format->cpp[1];
5355
5356                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5357                 address->video_progressive.luma_addr.low_part =
5358                         lower_32_bits(luma_addr);
5359                 address->video_progressive.luma_addr.high_part =
5360                         upper_32_bits(luma_addr);
5361                 address->video_progressive.chroma_addr.low_part =
5362                         lower_32_bits(chroma_addr);
5363                 address->video_progressive.chroma_addr.high_part =
5364                         upper_32_bits(chroma_addr);
5365         }
5366
5367         if (adev->family >= AMDGPU_FAMILY_AI) {
5368                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5369                                                                 rotation, plane_size,
5370                                                                 tiling_info, dcc,
5371                                                                 address,
5372                                                                 force_disable_dcc);
5373                 if (ret)
5374                         return ret;
5375         } else {
5376                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5377         }
5378
5379         return 0;
5380 }
5381
5382 static void
5383 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5384                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5385                                bool *global_alpha, int *global_alpha_value)
5386 {
5387         *per_pixel_alpha = false;
5388         *pre_multiplied_alpha = true;
5389         *global_alpha = false;
5390         *global_alpha_value = 0xff;
5391
5392         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5393                 return;
5394
5395         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5396                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5397                 static const uint32_t alpha_formats[] = {
5398                         DRM_FORMAT_ARGB8888,
5399                         DRM_FORMAT_RGBA8888,
5400                         DRM_FORMAT_ABGR8888,
5401                 };
5402                 uint32_t format = plane_state->fb->format->format;
5403                 unsigned int i;
5404
5405                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5406                         if (format == alpha_formats[i]) {
5407                                 *per_pixel_alpha = true;
5408                                 break;
5409                         }
5410                 }
5411
5412                 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5413                         *pre_multiplied_alpha = false;
5414         }
5415
5416         if (plane_state->alpha < 0xffff) {
5417                 *global_alpha = true;
5418                 *global_alpha_value = plane_state->alpha >> 8;
5419         }
5420 }
5421
5422 static int
5423 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5424                             const enum surface_pixel_format format,
5425                             enum dc_color_space *color_space)
5426 {
5427         bool full_range;
5428
5429         *color_space = COLOR_SPACE_SRGB;
5430
5431         /* DRM color properties only affect non-RGB formats. */
5432         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5433                 return 0;
5434
5435         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5436
5437         switch (plane_state->color_encoding) {
5438         case DRM_COLOR_YCBCR_BT601:
5439                 if (full_range)
5440                         *color_space = COLOR_SPACE_YCBCR601;
5441                 else
5442                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5443                 break;
5444
5445         case DRM_COLOR_YCBCR_BT709:
5446                 if (full_range)
5447                         *color_space = COLOR_SPACE_YCBCR709;
5448                 else
5449                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5450                 break;
5451
5452         case DRM_COLOR_YCBCR_BT2020:
5453                 if (full_range)
5454                         *color_space = COLOR_SPACE_2020_YCBCR;
5455                 else
5456                         return -EINVAL;
5457                 break;
5458
5459         default:
5460                 return -EINVAL;
5461         }
5462
5463         return 0;
5464 }
5465
5466 static int
5467 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5468                             const struct drm_plane_state *plane_state,
5469                             const uint64_t tiling_flags,
5470                             struct dc_plane_info *plane_info,
5471                             struct dc_plane_address *address,
5472                             bool tmz_surface,
5473                             bool force_disable_dcc)
5474 {
5475         const struct drm_framebuffer *fb = plane_state->fb;
5476         const struct amdgpu_framebuffer *afb =
5477                 to_amdgpu_framebuffer(plane_state->fb);
5478         int ret;
5479
5480         memset(plane_info, 0, sizeof(*plane_info));
5481
5482         switch (fb->format->format) {
5483         case DRM_FORMAT_C8:
5484                 plane_info->format =
5485                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5486                 break;
5487         case DRM_FORMAT_RGB565:
5488                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5489                 break;
5490         case DRM_FORMAT_XRGB8888:
5491         case DRM_FORMAT_ARGB8888:
5492                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5493                 break;
5494         case DRM_FORMAT_XRGB2101010:
5495         case DRM_FORMAT_ARGB2101010:
5496                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5497                 break;
5498         case DRM_FORMAT_XBGR2101010:
5499         case DRM_FORMAT_ABGR2101010:
5500                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5501                 break;
5502         case DRM_FORMAT_XBGR8888:
5503         case DRM_FORMAT_ABGR8888:
5504                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5505                 break;
5506         case DRM_FORMAT_NV21:
5507                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5508                 break;
5509         case DRM_FORMAT_NV12:
5510                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5511                 break;
5512         case DRM_FORMAT_P010:
5513                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5514                 break;
5515         case DRM_FORMAT_XRGB16161616F:
5516         case DRM_FORMAT_ARGB16161616F:
5517                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5518                 break;
5519         case DRM_FORMAT_XBGR16161616F:
5520         case DRM_FORMAT_ABGR16161616F:
5521                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5522                 break;
5523         case DRM_FORMAT_XRGB16161616:
5524         case DRM_FORMAT_ARGB16161616:
5525                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5526                 break;
5527         case DRM_FORMAT_XBGR16161616:
5528         case DRM_FORMAT_ABGR16161616:
5529                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5530                 break;
5531         default:
5532                 DRM_ERROR(
5533                         "Unsupported screen format %p4cc\n",
5534                         &fb->format->format);
5535                 return -EINVAL;
5536         }
5537
5538         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5539         case DRM_MODE_ROTATE_0:
5540                 plane_info->rotation = ROTATION_ANGLE_0;
5541                 break;
5542         case DRM_MODE_ROTATE_90:
5543                 plane_info->rotation = ROTATION_ANGLE_90;
5544                 break;
5545         case DRM_MODE_ROTATE_180:
5546                 plane_info->rotation = ROTATION_ANGLE_180;
5547                 break;
5548         case DRM_MODE_ROTATE_270:
5549                 plane_info->rotation = ROTATION_ANGLE_270;
5550                 break;
5551         default:
5552                 plane_info->rotation = ROTATION_ANGLE_0;
5553                 break;
5554         }
5555
5556         plane_info->visible = true;
5557         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5558
5559         plane_info->layer_index = 0;
5560
5561         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5562                                           &plane_info->color_space);
5563         if (ret)
5564                 return ret;
5565
5566         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5567                                            plane_info->rotation, tiling_flags,
5568                                            &plane_info->tiling_info,
5569                                            &plane_info->plane_size,
5570                                            &plane_info->dcc, address, tmz_surface,
5571                                            force_disable_dcc);
5572         if (ret)
5573                 return ret;
5574
5575         fill_blending_from_plane_state(
5576                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5577                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5578
5579         return 0;
5580 }
5581
5582 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5583                                     struct dc_plane_state *dc_plane_state,
5584                                     struct drm_plane_state *plane_state,
5585                                     struct drm_crtc_state *crtc_state)
5586 {
5587         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5588         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5589         struct dc_scaling_info scaling_info;
5590         struct dc_plane_info plane_info;
5591         int ret;
5592         bool force_disable_dcc = false;
5593
5594         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5595         if (ret)
5596                 return ret;
5597
5598         dc_plane_state->src_rect = scaling_info.src_rect;
5599         dc_plane_state->dst_rect = scaling_info.dst_rect;
5600         dc_plane_state->clip_rect = scaling_info.clip_rect;
5601         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5602
5603         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5604         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5605                                           afb->tiling_flags,
5606                                           &plane_info,
5607                                           &dc_plane_state->address,
5608                                           afb->tmz_surface,
5609                                           force_disable_dcc);
5610         if (ret)
5611                 return ret;
5612
5613         dc_plane_state->format = plane_info.format;
5614         dc_plane_state->color_space = plane_info.color_space;
5615         dc_plane_state->format = plane_info.format;
5616         dc_plane_state->plane_size = plane_info.plane_size;
5617         dc_plane_state->rotation = plane_info.rotation;
5618         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5619         dc_plane_state->stereo_format = plane_info.stereo_format;
5620         dc_plane_state->tiling_info = plane_info.tiling_info;
5621         dc_plane_state->visible = plane_info.visible;
5622         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5623         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5624         dc_plane_state->global_alpha = plane_info.global_alpha;
5625         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5626         dc_plane_state->dcc = plane_info.dcc;
5627         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5628         dc_plane_state->flip_int_enabled = true;
5629
5630         /*
5631          * Always set input transfer function, since plane state is refreshed
5632          * every time.
5633          */
5634         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5635         if (ret)
5636                 return ret;
5637
5638         return 0;
5639 }
5640
5641 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5642                                            const struct dm_connector_state *dm_state,
5643                                            struct dc_stream_state *stream)
5644 {
5645         enum amdgpu_rmx_type rmx_type;
5646
5647         struct rect src = { 0 }; /* viewport in composition space*/
5648         struct rect dst = { 0 }; /* stream addressable area */
5649
5650         /* no mode. nothing to be done */
5651         if (!mode)
5652                 return;
5653
5654         /* Full screen scaling by default */
5655         src.width = mode->hdisplay;
5656         src.height = mode->vdisplay;
5657         dst.width = stream->timing.h_addressable;
5658         dst.height = stream->timing.v_addressable;
5659
5660         if (dm_state) {
5661                 rmx_type = dm_state->scaling;
5662                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5663                         if (src.width * dst.height <
5664                                         src.height * dst.width) {
5665                                 /* height needs less upscaling/more downscaling */
5666                                 dst.width = src.width *
5667                                                 dst.height / src.height;
5668                         } else {
5669                                 /* width needs less upscaling/more downscaling */
5670                                 dst.height = src.height *
5671                                                 dst.width / src.width;
5672                         }
5673                 } else if (rmx_type == RMX_CENTER) {
5674                         dst = src;
5675                 }
5676
5677                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5678                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5679
5680                 if (dm_state->underscan_enable) {
5681                         dst.x += dm_state->underscan_hborder / 2;
5682                         dst.y += dm_state->underscan_vborder / 2;
5683                         dst.width -= dm_state->underscan_hborder;
5684                         dst.height -= dm_state->underscan_vborder;
5685                 }
5686         }
5687
5688         stream->src = src;
5689         stream->dst = dst;
5690
5691         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5692                       dst.x, dst.y, dst.width, dst.height);
5693
5694 }
5695
5696 static enum dc_color_depth
5697 convert_color_depth_from_display_info(const struct drm_connector *connector,
5698                                       bool is_y420, int requested_bpc)
5699 {
5700         uint8_t bpc;
5701
5702         if (is_y420) {
5703                 bpc = 8;
5704
5705                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5706                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5707                         bpc = 16;
5708                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5709                         bpc = 12;
5710                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5711                         bpc = 10;
5712         } else {
5713                 bpc = (uint8_t)connector->display_info.bpc;
5714                 /* Assume 8 bpc by default if no bpc is specified. */
5715                 bpc = bpc ? bpc : 8;
5716         }
5717
5718         if (requested_bpc > 0) {
5719                 /*
5720                  * Cap display bpc based on the user requested value.
5721                  *
5722                  * The value for state->max_bpc may not correctly updated
5723                  * depending on when the connector gets added to the state
5724                  * or if this was called outside of atomic check, so it
5725                  * can't be used directly.
5726                  */
5727                 bpc = min_t(u8, bpc, requested_bpc);
5728
5729                 /* Round down to the nearest even number. */
5730                 bpc = bpc - (bpc & 1);
5731         }
5732
5733         switch (bpc) {
5734         case 0:
5735                 /*
5736                  * Temporary Work around, DRM doesn't parse color depth for
5737                  * EDID revision before 1.4
5738                  * TODO: Fix edid parsing
5739                  */
5740                 return COLOR_DEPTH_888;
5741         case 6:
5742                 return COLOR_DEPTH_666;
5743         case 8:
5744                 return COLOR_DEPTH_888;
5745         case 10:
5746                 return COLOR_DEPTH_101010;
5747         case 12:
5748                 return COLOR_DEPTH_121212;
5749         case 14:
5750                 return COLOR_DEPTH_141414;
5751         case 16:
5752                 return COLOR_DEPTH_161616;
5753         default:
5754                 return COLOR_DEPTH_UNDEFINED;
5755         }
5756 }
5757
5758 static enum dc_aspect_ratio
5759 get_aspect_ratio(const struct drm_display_mode *mode_in)
5760 {
5761         /* 1-1 mapping, since both enums follow the HDMI spec. */
5762         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5763 }
5764
5765 static enum dc_color_space
5766 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5767 {
5768         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5769
5770         switch (dc_crtc_timing->pixel_encoding) {
5771         case PIXEL_ENCODING_YCBCR422:
5772         case PIXEL_ENCODING_YCBCR444:
5773         case PIXEL_ENCODING_YCBCR420:
5774         {
5775                 /*
5776                  * 27030khz is the separation point between HDTV and SDTV
5777                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5778                  * respectively
5779                  */
5780                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5781                         if (dc_crtc_timing->flags.Y_ONLY)
5782                                 color_space =
5783                                         COLOR_SPACE_YCBCR709_LIMITED;
5784                         else
5785                                 color_space = COLOR_SPACE_YCBCR709;
5786                 } else {
5787                         if (dc_crtc_timing->flags.Y_ONLY)
5788                                 color_space =
5789                                         COLOR_SPACE_YCBCR601_LIMITED;
5790                         else
5791                                 color_space = COLOR_SPACE_YCBCR601;
5792                 }
5793
5794         }
5795         break;
5796         case PIXEL_ENCODING_RGB:
5797                 color_space = COLOR_SPACE_SRGB;
5798                 break;
5799
5800         default:
5801                 WARN_ON(1);
5802                 break;
5803         }
5804
5805         return color_space;
5806 }
5807
5808 static bool adjust_colour_depth_from_display_info(
5809         struct dc_crtc_timing *timing_out,
5810         const struct drm_display_info *info)
5811 {
5812         enum dc_color_depth depth = timing_out->display_color_depth;
5813         int normalized_clk;
5814         do {
5815                 normalized_clk = timing_out->pix_clk_100hz / 10;
5816                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5817                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5818                         normalized_clk /= 2;
5819                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5820                 switch (depth) {
5821                 case COLOR_DEPTH_888:
5822                         break;
5823                 case COLOR_DEPTH_101010:
5824                         normalized_clk = (normalized_clk * 30) / 24;
5825                         break;
5826                 case COLOR_DEPTH_121212:
5827                         normalized_clk = (normalized_clk * 36) / 24;
5828                         break;
5829                 case COLOR_DEPTH_161616:
5830                         normalized_clk = (normalized_clk * 48) / 24;
5831                         break;
5832                 default:
5833                         /* The above depths are the only ones valid for HDMI. */
5834                         return false;
5835                 }
5836                 if (normalized_clk <= info->max_tmds_clock) {
5837                         timing_out->display_color_depth = depth;
5838                         return true;
5839                 }
5840         } while (--depth > COLOR_DEPTH_666);
5841         return false;
5842 }
5843
5844 static void fill_stream_properties_from_drm_display_mode(
5845         struct dc_stream_state *stream,
5846         const struct drm_display_mode *mode_in,
5847         const struct drm_connector *connector,
5848         const struct drm_connector_state *connector_state,
5849         const struct dc_stream_state *old_stream,
5850         int requested_bpc)
5851 {
5852         struct dc_crtc_timing *timing_out = &stream->timing;
5853         const struct drm_display_info *info = &connector->display_info;
5854         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5855         struct hdmi_vendor_infoframe hv_frame;
5856         struct hdmi_avi_infoframe avi_frame;
5857
5858         memset(&hv_frame, 0, sizeof(hv_frame));
5859         memset(&avi_frame, 0, sizeof(avi_frame));
5860
5861         timing_out->h_border_left = 0;
5862         timing_out->h_border_right = 0;
5863         timing_out->v_border_top = 0;
5864         timing_out->v_border_bottom = 0;
5865         /* TODO: un-hardcode */
5866         if (drm_mode_is_420_only(info, mode_in)
5867                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5868                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5869         else if (drm_mode_is_420_also(info, mode_in)
5870                         && aconnector->force_yuv420_output)
5871                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5872         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5873                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5874                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5875         else
5876                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5877
5878         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5879         timing_out->display_color_depth = convert_color_depth_from_display_info(
5880                 connector,
5881                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5882                 requested_bpc);
5883         timing_out->scan_type = SCANNING_TYPE_NODATA;
5884         timing_out->hdmi_vic = 0;
5885
5886         if(old_stream) {
5887                 timing_out->vic = old_stream->timing.vic;
5888                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5889                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5890         } else {
5891                 timing_out->vic = drm_match_cea_mode(mode_in);
5892                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5893                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5894                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5895                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5896         }
5897
5898         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5899                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5900                 timing_out->vic = avi_frame.video_code;
5901                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5902                 timing_out->hdmi_vic = hv_frame.vic;
5903         }
5904
5905         if (is_freesync_video_mode(mode_in, aconnector)) {
5906                 timing_out->h_addressable = mode_in->hdisplay;
5907                 timing_out->h_total = mode_in->htotal;
5908                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5909                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5910                 timing_out->v_total = mode_in->vtotal;
5911                 timing_out->v_addressable = mode_in->vdisplay;
5912                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5913                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5914                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5915         } else {
5916                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5917                 timing_out->h_total = mode_in->crtc_htotal;
5918                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5919                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5920                 timing_out->v_total = mode_in->crtc_vtotal;
5921                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5922                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5923                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5924                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5925         }
5926
5927         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5928
5929         stream->output_color_space = get_output_color_space(timing_out);
5930
5931         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5932         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5933         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5934                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5935                     drm_mode_is_420_also(info, mode_in) &&
5936                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5937                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5938                         adjust_colour_depth_from_display_info(timing_out, info);
5939                 }
5940         }
5941 }
5942
5943 static void fill_audio_info(struct audio_info *audio_info,
5944                             const struct drm_connector *drm_connector,
5945                             const struct dc_sink *dc_sink)
5946 {
5947         int i = 0;
5948         int cea_revision = 0;
5949         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5950
5951         audio_info->manufacture_id = edid_caps->manufacturer_id;
5952         audio_info->product_id = edid_caps->product_id;
5953
5954         cea_revision = drm_connector->display_info.cea_rev;
5955
5956         strscpy(audio_info->display_name,
5957                 edid_caps->display_name,
5958                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5959
5960         if (cea_revision >= 3) {
5961                 audio_info->mode_count = edid_caps->audio_mode_count;
5962
5963                 for (i = 0; i < audio_info->mode_count; ++i) {
5964                         audio_info->modes[i].format_code =
5965                                         (enum audio_format_code)
5966                                         (edid_caps->audio_modes[i].format_code);
5967                         audio_info->modes[i].channel_count =
5968                                         edid_caps->audio_modes[i].channel_count;
5969                         audio_info->modes[i].sample_rates.all =
5970                                         edid_caps->audio_modes[i].sample_rate;
5971                         audio_info->modes[i].sample_size =
5972                                         edid_caps->audio_modes[i].sample_size;
5973                 }
5974         }
5975
5976         audio_info->flags.all = edid_caps->speaker_flags;
5977
5978         /* TODO: We only check for the progressive mode, check for interlace mode too */
5979         if (drm_connector->latency_present[0]) {
5980                 audio_info->video_latency = drm_connector->video_latency[0];
5981                 audio_info->audio_latency = drm_connector->audio_latency[0];
5982         }
5983
5984         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5985
5986 }
5987
5988 static void
5989 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5990                                       struct drm_display_mode *dst_mode)
5991 {
5992         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5993         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5994         dst_mode->crtc_clock = src_mode->crtc_clock;
5995         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5996         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5997         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5998         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5999         dst_mode->crtc_htotal = src_mode->crtc_htotal;
6000         dst_mode->crtc_hskew = src_mode->crtc_hskew;
6001         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
6002         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6003         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6004         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6005         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6006 }
6007
6008 static void
6009 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6010                                         const struct drm_display_mode *native_mode,
6011                                         bool scale_enabled)
6012 {
6013         if (scale_enabled) {
6014                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6015         } else if (native_mode->clock == drm_mode->clock &&
6016                         native_mode->htotal == drm_mode->htotal &&
6017                         native_mode->vtotal == drm_mode->vtotal) {
6018                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6019         } else {
6020                 /* no scaling nor amdgpu inserted, no need to patch */
6021         }
6022 }
6023
6024 static struct dc_sink *
6025 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6026 {
6027         struct dc_sink_init_data sink_init_data = { 0 };
6028         struct dc_sink *sink = NULL;
6029         sink_init_data.link = aconnector->dc_link;
6030         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6031
6032         sink = dc_sink_create(&sink_init_data);
6033         if (!sink) {
6034                 DRM_ERROR("Failed to create sink!\n");
6035                 return NULL;
6036         }
6037         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6038
6039         return sink;
6040 }
6041
6042 static void set_multisync_trigger_params(
6043                 struct dc_stream_state *stream)
6044 {
6045         struct dc_stream_state *master = NULL;
6046
6047         if (stream->triggered_crtc_reset.enabled) {
6048                 master = stream->triggered_crtc_reset.event_source;
6049                 stream->triggered_crtc_reset.event =
6050                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6051                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6052                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6053         }
6054 }
6055
6056 static void set_master_stream(struct dc_stream_state *stream_set[],
6057                               int stream_count)
6058 {
6059         int j, highest_rfr = 0, master_stream = 0;
6060
6061         for (j = 0;  j < stream_count; j++) {
6062                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6063                         int refresh_rate = 0;
6064
6065                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6066                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6067                         if (refresh_rate > highest_rfr) {
6068                                 highest_rfr = refresh_rate;
6069                                 master_stream = j;
6070                         }
6071                 }
6072         }
6073         for (j = 0;  j < stream_count; j++) {
6074                 if (stream_set[j])
6075                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6076         }
6077 }
6078
6079 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6080 {
6081         int i = 0;
6082         struct dc_stream_state *stream;
6083
6084         if (context->stream_count < 2)
6085                 return;
6086         for (i = 0; i < context->stream_count ; i++) {
6087                 if (!context->streams[i])
6088                         continue;
6089                 /*
6090                  * TODO: add a function to read AMD VSDB bits and set
6091                  * crtc_sync_master.multi_sync_enabled flag
6092                  * For now it's set to false
6093                  */
6094         }
6095
6096         set_master_stream(context->streams, context->stream_count);
6097
6098         for (i = 0; i < context->stream_count ; i++) {
6099                 stream = context->streams[i];
6100
6101                 if (!stream)
6102                         continue;
6103
6104                 set_multisync_trigger_params(stream);
6105         }
6106 }
6107
6108 #if defined(CONFIG_DRM_AMD_DC_DCN)
6109 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6110                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6111                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6112 {
6113         stream->timing.flags.DSC = 0;
6114         dsc_caps->is_dsc_supported = false;
6115
6116         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6117                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6118                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6119                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6120                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6121                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6122                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6123                                 dsc_caps);
6124         }
6125 }
6126
6127 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6128                                     struct dc_sink *sink, struct dc_stream_state *stream,
6129                                     struct dsc_dec_dpcd_caps *dsc_caps,
6130                                     uint32_t max_dsc_target_bpp_limit_override)
6131 {
6132         const struct dc_link_settings *verified_link_cap = NULL;
6133         uint32_t link_bw_in_kbps;
6134         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6135         struct dc *dc = sink->ctx->dc;
6136         struct dc_dsc_bw_range bw_range = {0};
6137         struct dc_dsc_config dsc_cfg = {0};
6138
6139         verified_link_cap = dc_link_get_link_cap(stream->link);
6140         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6141         edp_min_bpp_x16 = 8 * 16;
6142         edp_max_bpp_x16 = 8 * 16;
6143
6144         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6145                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6146
6147         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6148                 edp_min_bpp_x16 = edp_max_bpp_x16;
6149
6150         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6151                                 dc->debug.dsc_min_slice_height_override,
6152                                 edp_min_bpp_x16, edp_max_bpp_x16,
6153                                 dsc_caps,
6154                                 &stream->timing,
6155                                 &bw_range)) {
6156
6157                 if (bw_range.max_kbps < link_bw_in_kbps) {
6158                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6159                                         dsc_caps,
6160                                         dc->debug.dsc_min_slice_height_override,
6161                                         max_dsc_target_bpp_limit_override,
6162                                         0,
6163                                         &stream->timing,
6164                                         &dsc_cfg)) {
6165                                 stream->timing.dsc_cfg = dsc_cfg;
6166                                 stream->timing.flags.DSC = 1;
6167                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6168                         }
6169                         return;
6170                 }
6171         }
6172
6173         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6174                                 dsc_caps,
6175                                 dc->debug.dsc_min_slice_height_override,
6176                                 max_dsc_target_bpp_limit_override,
6177                                 link_bw_in_kbps,
6178                                 &stream->timing,
6179                                 &dsc_cfg)) {
6180                 stream->timing.dsc_cfg = dsc_cfg;
6181                 stream->timing.flags.DSC = 1;
6182         }
6183 }
6184
6185 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6186                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6187                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6188 {
6189         struct drm_connector *drm_connector = &aconnector->base;
6190         uint32_t link_bandwidth_kbps;
6191         uint32_t max_dsc_target_bpp_limit_override = 0;
6192         struct dc *dc = sink->ctx->dc;
6193         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6194         uint32_t dsc_max_supported_bw_in_kbps;
6195
6196         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6197                                                         dc_link_get_link_cap(aconnector->dc_link));
6198
6199         if (stream->link && stream->link->local_sink)
6200                 max_dsc_target_bpp_limit_override =
6201                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6202
6203         /* Set DSC policy according to dsc_clock_en */
6204         dc_dsc_policy_set_enable_dsc_when_not_needed(
6205                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6206
6207         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6208             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6209
6210                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6211
6212         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6213                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6214                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6215                                                 dsc_caps,
6216                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6217                                                 max_dsc_target_bpp_limit_override,
6218                                                 link_bandwidth_kbps,
6219                                                 &stream->timing,
6220                                                 &stream->timing.dsc_cfg)) {
6221                                 stream->timing.flags.DSC = 1;
6222                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6223                                                                  __func__, drm_connector->name);
6224                         }
6225                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6226                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6227                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6228                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6229
6230                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6231                                         max_supported_bw_in_kbps > 0 &&
6232                                         dsc_max_supported_bw_in_kbps > 0)
6233                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6234                                                 dsc_caps,
6235                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6236                                                 max_dsc_target_bpp_limit_override,
6237                                                 dsc_max_supported_bw_in_kbps,
6238                                                 &stream->timing,
6239                                                 &stream->timing.dsc_cfg)) {
6240                                         stream->timing.flags.DSC = 1;
6241                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6242                                                                          __func__, drm_connector->name);
6243                                 }
6244                 }
6245         }
6246
6247         /* Overwrite the stream flag if DSC is enabled through debugfs */
6248         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6249                 stream->timing.flags.DSC = 1;
6250
6251         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6252                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6253
6254         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6255                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6256
6257         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6258                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6259 }
6260 #endif /* CONFIG_DRM_AMD_DC_DCN */
6261
6262 /**
6263  * DOC: FreeSync Video
6264  *
6265  * When a userspace application wants to play a video, the content follows a
6266  * standard format definition that usually specifies the FPS for that format.
6267  * The below list illustrates some video format and the expected FPS,
6268  * respectively:
6269  *
6270  * - TV/NTSC (23.976 FPS)
6271  * - Cinema (24 FPS)
6272  * - TV/PAL (25 FPS)
6273  * - TV/NTSC (29.97 FPS)
6274  * - TV/NTSC (30 FPS)
6275  * - Cinema HFR (48 FPS)
6276  * - TV/PAL (50 FPS)
6277  * - Commonly used (60 FPS)
6278  * - Multiples of 24 (48,72,96,120 FPS)
6279  *
6280  * The list of standards video format is not huge and can be added to the
6281  * connector modeset list beforehand. With that, userspace can leverage
6282  * FreeSync to extends the front porch in order to attain the target refresh
6283  * rate. Such a switch will happen seamlessly, without screen blanking or
6284  * reprogramming of the output in any other way. If the userspace requests a
6285  * modesetting change compatible with FreeSync modes that only differ in the
6286  * refresh rate, DC will skip the full update and avoid blink during the
6287  * transition. For example, the video player can change the modesetting from
6288  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6289  * causing any display blink. This same concept can be applied to a mode
6290  * setting change.
6291  */
6292 static struct drm_display_mode *
6293 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6294                           bool use_probed_modes)
6295 {
6296         struct drm_display_mode *m, *m_pref = NULL;
6297         u16 current_refresh, highest_refresh;
6298         struct list_head *list_head = use_probed_modes ?
6299                                                     &aconnector->base.probed_modes :
6300                                                     &aconnector->base.modes;
6301
6302         if (aconnector->freesync_vid_base.clock != 0)
6303                 return &aconnector->freesync_vid_base;
6304
6305         /* Find the preferred mode */
6306         list_for_each_entry (m, list_head, head) {
6307                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6308                         m_pref = m;
6309                         break;
6310                 }
6311         }
6312
6313         if (!m_pref) {
6314                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6315                 m_pref = list_first_entry_or_null(
6316                         &aconnector->base.modes, struct drm_display_mode, head);
6317                 if (!m_pref) {
6318                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6319                         return NULL;
6320                 }
6321         }
6322
6323         highest_refresh = drm_mode_vrefresh(m_pref);
6324
6325         /*
6326          * Find the mode with highest refresh rate with same resolution.
6327          * For some monitors, preferred mode is not the mode with highest
6328          * supported refresh rate.
6329          */
6330         list_for_each_entry (m, list_head, head) {
6331                 current_refresh  = drm_mode_vrefresh(m);
6332
6333                 if (m->hdisplay == m_pref->hdisplay &&
6334                     m->vdisplay == m_pref->vdisplay &&
6335                     highest_refresh < current_refresh) {
6336                         highest_refresh = current_refresh;
6337                         m_pref = m;
6338                 }
6339         }
6340
6341         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6342         return m_pref;
6343 }
6344
6345 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6346                                    struct amdgpu_dm_connector *aconnector)
6347 {
6348         struct drm_display_mode *high_mode;
6349         int timing_diff;
6350
6351         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6352         if (!high_mode || !mode)
6353                 return false;
6354
6355         timing_diff = high_mode->vtotal - mode->vtotal;
6356
6357         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6358             high_mode->hdisplay != mode->hdisplay ||
6359             high_mode->vdisplay != mode->vdisplay ||
6360             high_mode->hsync_start != mode->hsync_start ||
6361             high_mode->hsync_end != mode->hsync_end ||
6362             high_mode->htotal != mode->htotal ||
6363             high_mode->hskew != mode->hskew ||
6364             high_mode->vscan != mode->vscan ||
6365             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6366             high_mode->vsync_end - mode->vsync_end != timing_diff)
6367                 return false;
6368         else
6369                 return true;
6370 }
6371
6372 static struct dc_stream_state *
6373 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6374                        const struct drm_display_mode *drm_mode,
6375                        const struct dm_connector_state *dm_state,
6376                        const struct dc_stream_state *old_stream,
6377                        int requested_bpc)
6378 {
6379         struct drm_display_mode *preferred_mode = NULL;
6380         struct drm_connector *drm_connector;
6381         const struct drm_connector_state *con_state =
6382                 dm_state ? &dm_state->base : NULL;
6383         struct dc_stream_state *stream = NULL;
6384         struct drm_display_mode mode = *drm_mode;
6385         struct drm_display_mode saved_mode;
6386         struct drm_display_mode *freesync_mode = NULL;
6387         bool native_mode_found = false;
6388         bool recalculate_timing = false;
6389         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6390         int mode_refresh;
6391         int preferred_refresh = 0;
6392 #if defined(CONFIG_DRM_AMD_DC_DCN)
6393         struct dsc_dec_dpcd_caps dsc_caps;
6394 #endif
6395         struct dc_sink *sink = NULL;
6396
6397         memset(&saved_mode, 0, sizeof(saved_mode));
6398
6399         if (aconnector == NULL) {
6400                 DRM_ERROR("aconnector is NULL!\n");
6401                 return stream;
6402         }
6403
6404         drm_connector = &aconnector->base;
6405
6406         if (!aconnector->dc_sink) {
6407                 sink = create_fake_sink(aconnector);
6408                 if (!sink)
6409                         return stream;
6410         } else {
6411                 sink = aconnector->dc_sink;
6412                 dc_sink_retain(sink);
6413         }
6414
6415         stream = dc_create_stream_for_sink(sink);
6416
6417         if (stream == NULL) {
6418                 DRM_ERROR("Failed to create stream for sink!\n");
6419                 goto finish;
6420         }
6421
6422         stream->dm_stream_context = aconnector;
6423
6424         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6425                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6426
6427         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6428                 /* Search for preferred mode */
6429                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6430                         native_mode_found = true;
6431                         break;
6432                 }
6433         }
6434         if (!native_mode_found)
6435                 preferred_mode = list_first_entry_or_null(
6436                                 &aconnector->base.modes,
6437                                 struct drm_display_mode,
6438                                 head);
6439
6440         mode_refresh = drm_mode_vrefresh(&mode);
6441
6442         if (preferred_mode == NULL) {
6443                 /*
6444                  * This may not be an error, the use case is when we have no
6445                  * usermode calls to reset and set mode upon hotplug. In this
6446                  * case, we call set mode ourselves to restore the previous mode
6447                  * and the modelist may not be filled in in time.
6448                  */
6449                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6450         } else {
6451                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6452                 if (recalculate_timing) {
6453                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6454                         drm_mode_copy(&saved_mode, &mode);
6455                         drm_mode_copy(&mode, freesync_mode);
6456                 } else {
6457                         decide_crtc_timing_for_drm_display_mode(
6458                                 &mode, preferred_mode, scale);
6459
6460                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6461                 }
6462         }
6463
6464         if (recalculate_timing)
6465                 drm_mode_set_crtcinfo(&saved_mode, 0);
6466         else if (!dm_state)
6467                 drm_mode_set_crtcinfo(&mode, 0);
6468
6469        /*
6470         * If scaling is enabled and refresh rate didn't change
6471         * we copy the vic and polarities of the old timings
6472         */
6473         if (!scale || mode_refresh != preferred_refresh)
6474                 fill_stream_properties_from_drm_display_mode(
6475                         stream, &mode, &aconnector->base, con_state, NULL,
6476                         requested_bpc);
6477         else
6478                 fill_stream_properties_from_drm_display_mode(
6479                         stream, &mode, &aconnector->base, con_state, old_stream,
6480                         requested_bpc);
6481
6482 #if defined(CONFIG_DRM_AMD_DC_DCN)
6483         /* SST DSC determination policy */
6484         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6485         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6486                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6487 #endif
6488
6489         update_stream_scaling_settings(&mode, dm_state, stream);
6490
6491         fill_audio_info(
6492                 &stream->audio_info,
6493                 drm_connector,
6494                 sink);
6495
6496         update_stream_signal(stream, sink);
6497
6498         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6499                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6500
6501         if (stream->link->psr_settings.psr_feature_enabled) {
6502                 //
6503                 // should decide stream support vsc sdp colorimetry capability
6504                 // before building vsc info packet
6505                 //
6506                 stream->use_vsc_sdp_for_colorimetry = false;
6507                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6508                         stream->use_vsc_sdp_for_colorimetry =
6509                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6510                 } else {
6511                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6512                                 stream->use_vsc_sdp_for_colorimetry = true;
6513                 }
6514                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6515                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6516
6517         }
6518 finish:
6519         dc_sink_release(sink);
6520
6521         return stream;
6522 }
6523
6524 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6525 {
6526         drm_crtc_cleanup(crtc);
6527         kfree(crtc);
6528 }
6529
6530 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6531                                   struct drm_crtc_state *state)
6532 {
6533         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6534
6535         /* TODO Destroy dc_stream objects are stream object is flattened */
6536         if (cur->stream)
6537                 dc_stream_release(cur->stream);
6538
6539
6540         __drm_atomic_helper_crtc_destroy_state(state);
6541
6542
6543         kfree(state);
6544 }
6545
6546 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6547 {
6548         struct dm_crtc_state *state;
6549
6550         if (crtc->state)
6551                 dm_crtc_destroy_state(crtc, crtc->state);
6552
6553         state = kzalloc(sizeof(*state), GFP_KERNEL);
6554         if (WARN_ON(!state))
6555                 return;
6556
6557         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6558 }
6559
6560 static struct drm_crtc_state *
6561 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6562 {
6563         struct dm_crtc_state *state, *cur;
6564
6565         cur = to_dm_crtc_state(crtc->state);
6566
6567         if (WARN_ON(!crtc->state))
6568                 return NULL;
6569
6570         state = kzalloc(sizeof(*state), GFP_KERNEL);
6571         if (!state)
6572                 return NULL;
6573
6574         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6575
6576         if (cur->stream) {
6577                 state->stream = cur->stream;
6578                 dc_stream_retain(state->stream);
6579         }
6580
6581         state->active_planes = cur->active_planes;
6582         state->vrr_infopacket = cur->vrr_infopacket;
6583         state->abm_level = cur->abm_level;
6584         state->vrr_supported = cur->vrr_supported;
6585         state->freesync_config = cur->freesync_config;
6586         state->cm_has_degamma = cur->cm_has_degamma;
6587         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6588         state->force_dpms_off = cur->force_dpms_off;
6589         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6590
6591         return &state->base;
6592 }
6593
6594 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6595 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6596 {
6597         crtc_debugfs_init(crtc);
6598
6599         return 0;
6600 }
6601 #endif
6602
6603 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6604 {
6605         enum dc_irq_source irq_source;
6606         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6607         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6608         int rc;
6609
6610         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6611
6612         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6613
6614         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6615                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6616         return rc;
6617 }
6618
6619 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6620 {
6621         enum dc_irq_source irq_source;
6622         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6623         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6624         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6625         struct amdgpu_display_manager *dm = &adev->dm;
6626         struct vblank_control_work *work;
6627         int rc = 0;
6628
6629         if (enable) {
6630                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6631                 if (amdgpu_dm_vrr_active(acrtc_state))
6632                         rc = dm_set_vupdate_irq(crtc, true);
6633         } else {
6634                 /* vblank irq off -> vupdate irq off */
6635                 rc = dm_set_vupdate_irq(crtc, false);
6636         }
6637
6638         if (rc)
6639                 return rc;
6640
6641         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6642
6643         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6644                 return -EBUSY;
6645
6646         if (amdgpu_in_reset(adev))
6647                 return 0;
6648
6649         if (dm->vblank_control_workqueue) {
6650                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6651                 if (!work)
6652                         return -ENOMEM;
6653
6654                 INIT_WORK(&work->work, vblank_control_worker);
6655                 work->dm = dm;
6656                 work->acrtc = acrtc;
6657                 work->enable = enable;
6658
6659                 if (acrtc_state->stream) {
6660                         dc_stream_retain(acrtc_state->stream);
6661                         work->stream = acrtc_state->stream;
6662                 }
6663
6664                 queue_work(dm->vblank_control_workqueue, &work->work);
6665         }
6666
6667         return 0;
6668 }
6669
6670 static int dm_enable_vblank(struct drm_crtc *crtc)
6671 {
6672         return dm_set_vblank(crtc, true);
6673 }
6674
6675 static void dm_disable_vblank(struct drm_crtc *crtc)
6676 {
6677         dm_set_vblank(crtc, false);
6678 }
6679
6680 /* Implemented only the options currently availible for the driver */
6681 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6682         .reset = dm_crtc_reset_state,
6683         .destroy = amdgpu_dm_crtc_destroy,
6684         .set_config = drm_atomic_helper_set_config,
6685         .page_flip = drm_atomic_helper_page_flip,
6686         .atomic_duplicate_state = dm_crtc_duplicate_state,
6687         .atomic_destroy_state = dm_crtc_destroy_state,
6688         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6689         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6690         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6691         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6692         .enable_vblank = dm_enable_vblank,
6693         .disable_vblank = dm_disable_vblank,
6694         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6695 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6696         .late_register = amdgpu_dm_crtc_late_register,
6697 #endif
6698 };
6699
6700 static enum drm_connector_status
6701 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6702 {
6703         bool connected;
6704         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6705
6706         /*
6707          * Notes:
6708          * 1. This interface is NOT called in context of HPD irq.
6709          * 2. This interface *is called* in context of user-mode ioctl. Which
6710          * makes it a bad place for *any* MST-related activity.
6711          */
6712
6713         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6714             !aconnector->fake_enable)
6715                 connected = (aconnector->dc_sink != NULL);
6716         else
6717                 connected = (aconnector->base.force == DRM_FORCE_ON);
6718
6719         update_subconnector_property(aconnector);
6720
6721         return (connected ? connector_status_connected :
6722                         connector_status_disconnected);
6723 }
6724
6725 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6726                                             struct drm_connector_state *connector_state,
6727                                             struct drm_property *property,
6728                                             uint64_t val)
6729 {
6730         struct drm_device *dev = connector->dev;
6731         struct amdgpu_device *adev = drm_to_adev(dev);
6732         struct dm_connector_state *dm_old_state =
6733                 to_dm_connector_state(connector->state);
6734         struct dm_connector_state *dm_new_state =
6735                 to_dm_connector_state(connector_state);
6736
6737         int ret = -EINVAL;
6738
6739         if (property == dev->mode_config.scaling_mode_property) {
6740                 enum amdgpu_rmx_type rmx_type;
6741
6742                 switch (val) {
6743                 case DRM_MODE_SCALE_CENTER:
6744                         rmx_type = RMX_CENTER;
6745                         break;
6746                 case DRM_MODE_SCALE_ASPECT:
6747                         rmx_type = RMX_ASPECT;
6748                         break;
6749                 case DRM_MODE_SCALE_FULLSCREEN:
6750                         rmx_type = RMX_FULL;
6751                         break;
6752                 case DRM_MODE_SCALE_NONE:
6753                 default:
6754                         rmx_type = RMX_OFF;
6755                         break;
6756                 }
6757
6758                 if (dm_old_state->scaling == rmx_type)
6759                         return 0;
6760
6761                 dm_new_state->scaling = rmx_type;
6762                 ret = 0;
6763         } else if (property == adev->mode_info.underscan_hborder_property) {
6764                 dm_new_state->underscan_hborder = val;
6765                 ret = 0;
6766         } else if (property == adev->mode_info.underscan_vborder_property) {
6767                 dm_new_state->underscan_vborder = val;
6768                 ret = 0;
6769         } else if (property == adev->mode_info.underscan_property) {
6770                 dm_new_state->underscan_enable = val;
6771                 ret = 0;
6772         } else if (property == adev->mode_info.abm_level_property) {
6773                 dm_new_state->abm_level = val;
6774                 ret = 0;
6775         }
6776
6777         return ret;
6778 }
6779
6780 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6781                                             const struct drm_connector_state *state,
6782                                             struct drm_property *property,
6783                                             uint64_t *val)
6784 {
6785         struct drm_device *dev = connector->dev;
6786         struct amdgpu_device *adev = drm_to_adev(dev);
6787         struct dm_connector_state *dm_state =
6788                 to_dm_connector_state(state);
6789         int ret = -EINVAL;
6790
6791         if (property == dev->mode_config.scaling_mode_property) {
6792                 switch (dm_state->scaling) {
6793                 case RMX_CENTER:
6794                         *val = DRM_MODE_SCALE_CENTER;
6795                         break;
6796                 case RMX_ASPECT:
6797                         *val = DRM_MODE_SCALE_ASPECT;
6798                         break;
6799                 case RMX_FULL:
6800                         *val = DRM_MODE_SCALE_FULLSCREEN;
6801                         break;
6802                 case RMX_OFF:
6803                 default:
6804                         *val = DRM_MODE_SCALE_NONE;
6805                         break;
6806                 }
6807                 ret = 0;
6808         } else if (property == adev->mode_info.underscan_hborder_property) {
6809                 *val = dm_state->underscan_hborder;
6810                 ret = 0;
6811         } else if (property == adev->mode_info.underscan_vborder_property) {
6812                 *val = dm_state->underscan_vborder;
6813                 ret = 0;
6814         } else if (property == adev->mode_info.underscan_property) {
6815                 *val = dm_state->underscan_enable;
6816                 ret = 0;
6817         } else if (property == adev->mode_info.abm_level_property) {
6818                 *val = dm_state->abm_level;
6819                 ret = 0;
6820         }
6821
6822         return ret;
6823 }
6824
6825 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6826 {
6827         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6828
6829         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6830 }
6831
6832 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6833 {
6834         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6835         const struct dc_link *link = aconnector->dc_link;
6836         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6837         struct amdgpu_display_manager *dm = &adev->dm;
6838         int i;
6839
6840         /*
6841          * Call only if mst_mgr was iniitalized before since it's not done
6842          * for all connector types.
6843          */
6844         if (aconnector->mst_mgr.dev)
6845                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6846
6847 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6848         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6849         for (i = 0; i < dm->num_of_edps; i++) {
6850                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6851                         backlight_device_unregister(dm->backlight_dev[i]);
6852                         dm->backlight_dev[i] = NULL;
6853                 }
6854         }
6855 #endif
6856
6857         if (aconnector->dc_em_sink)
6858                 dc_sink_release(aconnector->dc_em_sink);
6859         aconnector->dc_em_sink = NULL;
6860         if (aconnector->dc_sink)
6861                 dc_sink_release(aconnector->dc_sink);
6862         aconnector->dc_sink = NULL;
6863
6864         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6865         drm_connector_unregister(connector);
6866         drm_connector_cleanup(connector);
6867         if (aconnector->i2c) {
6868                 i2c_del_adapter(&aconnector->i2c->base);
6869                 kfree(aconnector->i2c);
6870         }
6871         kfree(aconnector->dm_dp_aux.aux.name);
6872
6873         kfree(connector);
6874 }
6875
6876 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6877 {
6878         struct dm_connector_state *state =
6879                 to_dm_connector_state(connector->state);
6880
6881         if (connector->state)
6882                 __drm_atomic_helper_connector_destroy_state(connector->state);
6883
6884         kfree(state);
6885
6886         state = kzalloc(sizeof(*state), GFP_KERNEL);
6887
6888         if (state) {
6889                 state->scaling = RMX_OFF;
6890                 state->underscan_enable = false;
6891                 state->underscan_hborder = 0;
6892                 state->underscan_vborder = 0;
6893                 state->base.max_requested_bpc = 8;
6894                 state->vcpi_slots = 0;
6895                 state->pbn = 0;
6896                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6897                         state->abm_level = amdgpu_dm_abm_level;
6898
6899                 __drm_atomic_helper_connector_reset(connector, &state->base);
6900         }
6901 }
6902
6903 struct drm_connector_state *
6904 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6905 {
6906         struct dm_connector_state *state =
6907                 to_dm_connector_state(connector->state);
6908
6909         struct dm_connector_state *new_state =
6910                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6911
6912         if (!new_state)
6913                 return NULL;
6914
6915         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6916
6917         new_state->freesync_capable = state->freesync_capable;
6918         new_state->abm_level = state->abm_level;
6919         new_state->scaling = state->scaling;
6920         new_state->underscan_enable = state->underscan_enable;
6921         new_state->underscan_hborder = state->underscan_hborder;
6922         new_state->underscan_vborder = state->underscan_vborder;
6923         new_state->vcpi_slots = state->vcpi_slots;
6924         new_state->pbn = state->pbn;
6925         return &new_state->base;
6926 }
6927
6928 static int
6929 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6930 {
6931         struct amdgpu_dm_connector *amdgpu_dm_connector =
6932                 to_amdgpu_dm_connector(connector);
6933         int r;
6934
6935         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6936             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6937                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6938                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6939                 if (r)
6940                         return r;
6941         }
6942
6943 #if defined(CONFIG_DEBUG_FS)
6944         connector_debugfs_init(amdgpu_dm_connector);
6945 #endif
6946
6947         return 0;
6948 }
6949
6950 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6951         .reset = amdgpu_dm_connector_funcs_reset,
6952         .detect = amdgpu_dm_connector_detect,
6953         .fill_modes = drm_helper_probe_single_connector_modes,
6954         .destroy = amdgpu_dm_connector_destroy,
6955         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6956         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6957         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6958         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6959         .late_register = amdgpu_dm_connector_late_register,
6960         .early_unregister = amdgpu_dm_connector_unregister
6961 };
6962
6963 static int get_modes(struct drm_connector *connector)
6964 {
6965         return amdgpu_dm_connector_get_modes(connector);
6966 }
6967
6968 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6969 {
6970         struct dc_sink_init_data init_params = {
6971                         .link = aconnector->dc_link,
6972                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6973         };
6974         struct edid *edid;
6975
6976         if (!aconnector->base.edid_blob_ptr) {
6977                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6978                                 aconnector->base.name);
6979
6980                 aconnector->base.force = DRM_FORCE_OFF;
6981                 aconnector->base.override_edid = false;
6982                 return;
6983         }
6984
6985         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6986
6987         aconnector->edid = edid;
6988
6989         aconnector->dc_em_sink = dc_link_add_remote_sink(
6990                 aconnector->dc_link,
6991                 (uint8_t *)edid,
6992                 (edid->extensions + 1) * EDID_LENGTH,
6993                 &init_params);
6994
6995         if (aconnector->base.force == DRM_FORCE_ON) {
6996                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6997                 aconnector->dc_link->local_sink :
6998                 aconnector->dc_em_sink;
6999                 dc_sink_retain(aconnector->dc_sink);
7000         }
7001 }
7002
7003 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7004 {
7005         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7006
7007         /*
7008          * In case of headless boot with force on for DP managed connector
7009          * Those settings have to be != 0 to get initial modeset
7010          */
7011         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7012                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7013                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7014         }
7015
7016
7017         aconnector->base.override_edid = true;
7018         create_eml_sink(aconnector);
7019 }
7020
7021 struct dc_stream_state *
7022 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7023                                 const struct drm_display_mode *drm_mode,
7024                                 const struct dm_connector_state *dm_state,
7025                                 const struct dc_stream_state *old_stream)
7026 {
7027         struct drm_connector *connector = &aconnector->base;
7028         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7029         struct dc_stream_state *stream;
7030         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7031         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7032         enum dc_status dc_result = DC_OK;
7033
7034         do {
7035                 stream = create_stream_for_sink(aconnector, drm_mode,
7036                                                 dm_state, old_stream,
7037                                                 requested_bpc);
7038                 if (stream == NULL) {
7039                         DRM_ERROR("Failed to create stream for sink!\n");
7040                         break;
7041                 }
7042
7043                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7044
7045                 if (dc_result != DC_OK) {
7046                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7047                                       drm_mode->hdisplay,
7048                                       drm_mode->vdisplay,
7049                                       drm_mode->clock,
7050                                       dc_result,
7051                                       dc_status_to_str(dc_result));
7052
7053                         dc_stream_release(stream);
7054                         stream = NULL;
7055                         requested_bpc -= 2; /* lower bpc to retry validation */
7056                 }
7057
7058         } while (stream == NULL && requested_bpc >= 6);
7059
7060         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7061                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7062
7063                 aconnector->force_yuv420_output = true;
7064                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7065                                                 dm_state, old_stream);
7066                 aconnector->force_yuv420_output = false;
7067         }
7068
7069         return stream;
7070 }
7071
7072 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7073                                    struct drm_display_mode *mode)
7074 {
7075         int result = MODE_ERROR;
7076         struct dc_sink *dc_sink;
7077         /* TODO: Unhardcode stream count */
7078         struct dc_stream_state *stream;
7079         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7080
7081         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7082                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7083                 return result;
7084
7085         /*
7086          * Only run this the first time mode_valid is called to initilialize
7087          * EDID mgmt
7088          */
7089         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7090                 !aconnector->dc_em_sink)
7091                 handle_edid_mgmt(aconnector);
7092
7093         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7094
7095         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7096                                 aconnector->base.force != DRM_FORCE_ON) {
7097                 DRM_ERROR("dc_sink is NULL!\n");
7098                 goto fail;
7099         }
7100
7101         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7102         if (stream) {
7103                 dc_stream_release(stream);
7104                 result = MODE_OK;
7105         }
7106
7107 fail:
7108         /* TODO: error handling*/
7109         return result;
7110 }
7111
7112 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7113                                 struct dc_info_packet *out)
7114 {
7115         struct hdmi_drm_infoframe frame;
7116         unsigned char buf[30]; /* 26 + 4 */
7117         ssize_t len;
7118         int ret, i;
7119
7120         memset(out, 0, sizeof(*out));
7121
7122         if (!state->hdr_output_metadata)
7123                 return 0;
7124
7125         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7126         if (ret)
7127                 return ret;
7128
7129         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7130         if (len < 0)
7131                 return (int)len;
7132
7133         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7134         if (len != 30)
7135                 return -EINVAL;
7136
7137         /* Prepare the infopacket for DC. */
7138         switch (state->connector->connector_type) {
7139         case DRM_MODE_CONNECTOR_HDMIA:
7140                 out->hb0 = 0x87; /* type */
7141                 out->hb1 = 0x01; /* version */
7142                 out->hb2 = 0x1A; /* length */
7143                 out->sb[0] = buf[3]; /* checksum */
7144                 i = 1;
7145                 break;
7146
7147         case DRM_MODE_CONNECTOR_DisplayPort:
7148         case DRM_MODE_CONNECTOR_eDP:
7149                 out->hb0 = 0x00; /* sdp id, zero */
7150                 out->hb1 = 0x87; /* type */
7151                 out->hb2 = 0x1D; /* payload len - 1 */
7152                 out->hb3 = (0x13 << 2); /* sdp version */
7153                 out->sb[0] = 0x01; /* version */
7154                 out->sb[1] = 0x1A; /* length */
7155                 i = 2;
7156                 break;
7157
7158         default:
7159                 return -EINVAL;
7160         }
7161
7162         memcpy(&out->sb[i], &buf[4], 26);
7163         out->valid = true;
7164
7165         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7166                        sizeof(out->sb), false);
7167
7168         return 0;
7169 }
7170
7171 static int
7172 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7173                                  struct drm_atomic_state *state)
7174 {
7175         struct drm_connector_state *new_con_state =
7176                 drm_atomic_get_new_connector_state(state, conn);
7177         struct drm_connector_state *old_con_state =
7178                 drm_atomic_get_old_connector_state(state, conn);
7179         struct drm_crtc *crtc = new_con_state->crtc;
7180         struct drm_crtc_state *new_crtc_state;
7181         int ret;
7182
7183         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7184
7185         if (!crtc)
7186                 return 0;
7187
7188         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7189                 struct dc_info_packet hdr_infopacket;
7190
7191                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7192                 if (ret)
7193                         return ret;
7194
7195                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7196                 if (IS_ERR(new_crtc_state))
7197                         return PTR_ERR(new_crtc_state);
7198
7199                 /*
7200                  * DC considers the stream backends changed if the
7201                  * static metadata changes. Forcing the modeset also
7202                  * gives a simple way for userspace to switch from
7203                  * 8bpc to 10bpc when setting the metadata to enter
7204                  * or exit HDR.
7205                  *
7206                  * Changing the static metadata after it's been
7207                  * set is permissible, however. So only force a
7208                  * modeset if we're entering or exiting HDR.
7209                  */
7210                 new_crtc_state->mode_changed =
7211                         !old_con_state->hdr_output_metadata ||
7212                         !new_con_state->hdr_output_metadata;
7213         }
7214
7215         return 0;
7216 }
7217
7218 static const struct drm_connector_helper_funcs
7219 amdgpu_dm_connector_helper_funcs = {
7220         /*
7221          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7222          * modes will be filtered by drm_mode_validate_size(), and those modes
7223          * are missing after user start lightdm. So we need to renew modes list.
7224          * in get_modes call back, not just return the modes count
7225          */
7226         .get_modes = get_modes,
7227         .mode_valid = amdgpu_dm_connector_mode_valid,
7228         .atomic_check = amdgpu_dm_connector_atomic_check,
7229 };
7230
7231 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7232 {
7233 }
7234
7235 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7236 {
7237         struct drm_atomic_state *state = new_crtc_state->state;
7238         struct drm_plane *plane;
7239         int num_active = 0;
7240
7241         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7242                 struct drm_plane_state *new_plane_state;
7243
7244                 /* Cursor planes are "fake". */
7245                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7246                         continue;
7247
7248                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7249
7250                 if (!new_plane_state) {
7251                         /*
7252                          * The plane is enable on the CRTC and hasn't changed
7253                          * state. This means that it previously passed
7254                          * validation and is therefore enabled.
7255                          */
7256                         num_active += 1;
7257                         continue;
7258                 }
7259
7260                 /* We need a framebuffer to be considered enabled. */
7261                 num_active += (new_plane_state->fb != NULL);
7262         }
7263
7264         return num_active;
7265 }
7266
7267 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7268                                          struct drm_crtc_state *new_crtc_state)
7269 {
7270         struct dm_crtc_state *dm_new_crtc_state =
7271                 to_dm_crtc_state(new_crtc_state);
7272
7273         dm_new_crtc_state->active_planes = 0;
7274
7275         if (!dm_new_crtc_state->stream)
7276                 return;
7277
7278         dm_new_crtc_state->active_planes =
7279                 count_crtc_active_planes(new_crtc_state);
7280 }
7281
7282 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7283                                        struct drm_atomic_state *state)
7284 {
7285         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7286                                                                           crtc);
7287         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7288         struct dc *dc = adev->dm.dc;
7289         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7290         int ret = -EINVAL;
7291
7292         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7293
7294         dm_update_crtc_active_planes(crtc, crtc_state);
7295
7296         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7297                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7298                 return ret;
7299         }
7300
7301         /*
7302          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7303          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7304          * planes are disabled, which is not supported by the hardware. And there is legacy
7305          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7306          */
7307         if (crtc_state->enable &&
7308             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7309                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7310                 return -EINVAL;
7311         }
7312
7313         /* In some use cases, like reset, no stream is attached */
7314         if (!dm_crtc_state->stream)
7315                 return 0;
7316
7317         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7318                 return 0;
7319
7320         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7321         return ret;
7322 }
7323
7324 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7325                                       const struct drm_display_mode *mode,
7326                                       struct drm_display_mode *adjusted_mode)
7327 {
7328         return true;
7329 }
7330
7331 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7332         .disable = dm_crtc_helper_disable,
7333         .atomic_check = dm_crtc_helper_atomic_check,
7334         .mode_fixup = dm_crtc_helper_mode_fixup,
7335         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7336 };
7337
7338 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7339 {
7340
7341 }
7342
7343 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7344 {
7345         switch (display_color_depth) {
7346                 case COLOR_DEPTH_666:
7347                         return 6;
7348                 case COLOR_DEPTH_888:
7349                         return 8;
7350                 case COLOR_DEPTH_101010:
7351                         return 10;
7352                 case COLOR_DEPTH_121212:
7353                         return 12;
7354                 case COLOR_DEPTH_141414:
7355                         return 14;
7356                 case COLOR_DEPTH_161616:
7357                         return 16;
7358                 default:
7359                         break;
7360                 }
7361         return 0;
7362 }
7363
7364 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7365                                           struct drm_crtc_state *crtc_state,
7366                                           struct drm_connector_state *conn_state)
7367 {
7368         struct drm_atomic_state *state = crtc_state->state;
7369         struct drm_connector *connector = conn_state->connector;
7370         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7371         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7372         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7373         struct drm_dp_mst_topology_mgr *mst_mgr;
7374         struct drm_dp_mst_port *mst_port;
7375         enum dc_color_depth color_depth;
7376         int clock, bpp = 0;
7377         bool is_y420 = false;
7378
7379         if (!aconnector->port || !aconnector->dc_sink)
7380                 return 0;
7381
7382         mst_port = aconnector->port;
7383         mst_mgr = &aconnector->mst_port->mst_mgr;
7384
7385         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7386                 return 0;
7387
7388         if (!state->duplicated) {
7389                 int max_bpc = conn_state->max_requested_bpc;
7390                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7391                                 aconnector->force_yuv420_output;
7392                 color_depth = convert_color_depth_from_display_info(connector,
7393                                                                     is_y420,
7394                                                                     max_bpc);
7395                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7396                 clock = adjusted_mode->clock;
7397                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7398         }
7399         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7400                                                                            mst_mgr,
7401                                                                            mst_port,
7402                                                                            dm_new_connector_state->pbn,
7403                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7404         if (dm_new_connector_state->vcpi_slots < 0) {
7405                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7406                 return dm_new_connector_state->vcpi_slots;
7407         }
7408         return 0;
7409 }
7410
7411 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7412         .disable = dm_encoder_helper_disable,
7413         .atomic_check = dm_encoder_helper_atomic_check
7414 };
7415
7416 #if defined(CONFIG_DRM_AMD_DC_DCN)
7417 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7418                                             struct dc_state *dc_state,
7419                                             struct dsc_mst_fairness_vars *vars)
7420 {
7421         struct dc_stream_state *stream = NULL;
7422         struct drm_connector *connector;
7423         struct drm_connector_state *new_con_state;
7424         struct amdgpu_dm_connector *aconnector;
7425         struct dm_connector_state *dm_conn_state;
7426         int i, j;
7427         int vcpi, pbn_div, pbn, slot_num = 0;
7428
7429         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7430
7431                 aconnector = to_amdgpu_dm_connector(connector);
7432
7433                 if (!aconnector->port)
7434                         continue;
7435
7436                 if (!new_con_state || !new_con_state->crtc)
7437                         continue;
7438
7439                 dm_conn_state = to_dm_connector_state(new_con_state);
7440
7441                 for (j = 0; j < dc_state->stream_count; j++) {
7442                         stream = dc_state->streams[j];
7443                         if (!stream)
7444                                 continue;
7445
7446                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7447                                 break;
7448
7449                         stream = NULL;
7450                 }
7451
7452                 if (!stream)
7453                         continue;
7454
7455                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7456                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7457                 for (j = 0; j < dc_state->stream_count; j++) {
7458                         if (vars[j].aconnector == aconnector) {
7459                                 pbn = vars[j].pbn;
7460                                 break;
7461                         }
7462                 }
7463
7464                 if (j == dc_state->stream_count)
7465                         continue;
7466
7467                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7468
7469                 if (stream->timing.flags.DSC != 1) {
7470                         dm_conn_state->pbn = pbn;
7471                         dm_conn_state->vcpi_slots = slot_num;
7472
7473                         drm_dp_mst_atomic_enable_dsc(state,
7474                                                      aconnector->port,
7475                                                      dm_conn_state->pbn,
7476                                                      0,
7477                                                      false);
7478                         continue;
7479                 }
7480
7481                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7482                                                     aconnector->port,
7483                                                     pbn, pbn_div,
7484                                                     true);
7485                 if (vcpi < 0)
7486                         return vcpi;
7487
7488                 dm_conn_state->pbn = pbn;
7489                 dm_conn_state->vcpi_slots = vcpi;
7490         }
7491         return 0;
7492 }
7493 #endif
7494
7495 static void dm_drm_plane_reset(struct drm_plane *plane)
7496 {
7497         struct dm_plane_state *amdgpu_state = NULL;
7498
7499         if (plane->state)
7500                 plane->funcs->atomic_destroy_state(plane, plane->state);
7501
7502         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7503         WARN_ON(amdgpu_state == NULL);
7504
7505         if (amdgpu_state)
7506                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7507 }
7508
7509 static struct drm_plane_state *
7510 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7511 {
7512         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7513
7514         old_dm_plane_state = to_dm_plane_state(plane->state);
7515         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7516         if (!dm_plane_state)
7517                 return NULL;
7518
7519         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7520
7521         if (old_dm_plane_state->dc_state) {
7522                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7523                 dc_plane_state_retain(dm_plane_state->dc_state);
7524         }
7525
7526         return &dm_plane_state->base;
7527 }
7528
7529 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7530                                 struct drm_plane_state *state)
7531 {
7532         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7533
7534         if (dm_plane_state->dc_state)
7535                 dc_plane_state_release(dm_plane_state->dc_state);
7536
7537         drm_atomic_helper_plane_destroy_state(plane, state);
7538 }
7539
7540 static const struct drm_plane_funcs dm_plane_funcs = {
7541         .update_plane   = drm_atomic_helper_update_plane,
7542         .disable_plane  = drm_atomic_helper_disable_plane,
7543         .destroy        = drm_primary_helper_destroy,
7544         .reset = dm_drm_plane_reset,
7545         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7546         .atomic_destroy_state = dm_drm_plane_destroy_state,
7547         .format_mod_supported = dm_plane_format_mod_supported,
7548 };
7549
7550 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7551                                       struct drm_plane_state *new_state)
7552 {
7553         struct amdgpu_framebuffer *afb;
7554         struct drm_gem_object *obj;
7555         struct amdgpu_device *adev;
7556         struct amdgpu_bo *rbo;
7557         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7558         uint32_t domain;
7559         int r;
7560
7561         if (!new_state->fb) {
7562                 DRM_DEBUG_KMS("No FB bound\n");
7563                 return 0;
7564         }
7565
7566         afb = to_amdgpu_framebuffer(new_state->fb);
7567         obj = new_state->fb->obj[0];
7568         rbo = gem_to_amdgpu_bo(obj);
7569         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7570
7571         r = amdgpu_bo_reserve(rbo, true);
7572         if (r) {
7573                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7574                 return r;
7575         }
7576
7577         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7578         if (r) {
7579                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7580                 goto error_unlock;
7581         }
7582
7583         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7584                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7585         else
7586                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7587
7588         r = amdgpu_bo_pin(rbo, domain);
7589         if (unlikely(r != 0)) {
7590                 if (r != -ERESTARTSYS)
7591                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7592                 goto error_unlock;
7593         }
7594
7595         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7596         if (unlikely(r != 0)) {
7597                 DRM_ERROR("%p bind failed\n", rbo);
7598                 goto error_unpin;
7599         }
7600
7601         amdgpu_bo_unreserve(rbo);
7602
7603         afb->address = amdgpu_bo_gpu_offset(rbo);
7604
7605         amdgpu_bo_ref(rbo);
7606
7607         /**
7608          * We don't do surface updates on planes that have been newly created,
7609          * but we also don't have the afb->address during atomic check.
7610          *
7611          * Fill in buffer attributes depending on the address here, but only on
7612          * newly created planes since they're not being used by DC yet and this
7613          * won't modify global state.
7614          */
7615         dm_plane_state_old = to_dm_plane_state(plane->state);
7616         dm_plane_state_new = to_dm_plane_state(new_state);
7617
7618         if (dm_plane_state_new->dc_state &&
7619             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7620                 struct dc_plane_state *plane_state =
7621                         dm_plane_state_new->dc_state;
7622                 bool force_disable_dcc = !plane_state->dcc.enable;
7623
7624                 fill_plane_buffer_attributes(
7625                         adev, afb, plane_state->format, plane_state->rotation,
7626                         afb->tiling_flags,
7627                         &plane_state->tiling_info, &plane_state->plane_size,
7628                         &plane_state->dcc, &plane_state->address,
7629                         afb->tmz_surface, force_disable_dcc);
7630         }
7631
7632         return 0;
7633
7634 error_unpin:
7635         amdgpu_bo_unpin(rbo);
7636
7637 error_unlock:
7638         amdgpu_bo_unreserve(rbo);
7639         return r;
7640 }
7641
7642 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7643                                        struct drm_plane_state *old_state)
7644 {
7645         struct amdgpu_bo *rbo;
7646         int r;
7647
7648         if (!old_state->fb)
7649                 return;
7650
7651         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7652         r = amdgpu_bo_reserve(rbo, false);
7653         if (unlikely(r)) {
7654                 DRM_ERROR("failed to reserve rbo before unpin\n");
7655                 return;
7656         }
7657
7658         amdgpu_bo_unpin(rbo);
7659         amdgpu_bo_unreserve(rbo);
7660         amdgpu_bo_unref(&rbo);
7661 }
7662
7663 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7664                                        struct drm_crtc_state *new_crtc_state)
7665 {
7666         struct drm_framebuffer *fb = state->fb;
7667         int min_downscale, max_upscale;
7668         int min_scale = 0;
7669         int max_scale = INT_MAX;
7670
7671         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7672         if (fb && state->crtc) {
7673                 /* Validate viewport to cover the case when only the position changes */
7674                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7675                         int viewport_width = state->crtc_w;
7676                         int viewport_height = state->crtc_h;
7677
7678                         if (state->crtc_x < 0)
7679                                 viewport_width += state->crtc_x;
7680                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7681                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7682
7683                         if (state->crtc_y < 0)
7684                                 viewport_height += state->crtc_y;
7685                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7686                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7687
7688                         if (viewport_width < 0 || viewport_height < 0) {
7689                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7690                                 return -EINVAL;
7691                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7692                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7693                                 return -EINVAL;
7694                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7695                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7696                                 return -EINVAL;
7697                         }
7698
7699                 }
7700
7701                 /* Get min/max allowed scaling factors from plane caps. */
7702                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7703                                              &min_downscale, &max_upscale);
7704                 /*
7705                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7706                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7707                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7708                  */
7709                 min_scale = (1000 << 16) / max_upscale;
7710                 max_scale = (1000 << 16) / min_downscale;
7711         }
7712
7713         return drm_atomic_helper_check_plane_state(
7714                 state, new_crtc_state, min_scale, max_scale, true, true);
7715 }
7716
7717 static int dm_plane_atomic_check(struct drm_plane *plane,
7718                                  struct drm_atomic_state *state)
7719 {
7720         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7721                                                                                  plane);
7722         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7723         struct dc *dc = adev->dm.dc;
7724         struct dm_plane_state *dm_plane_state;
7725         struct dc_scaling_info scaling_info;
7726         struct drm_crtc_state *new_crtc_state;
7727         int ret;
7728
7729         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7730
7731         dm_plane_state = to_dm_plane_state(new_plane_state);
7732
7733         if (!dm_plane_state->dc_state)
7734                 return 0;
7735
7736         new_crtc_state =
7737                 drm_atomic_get_new_crtc_state(state,
7738                                               new_plane_state->crtc);
7739         if (!new_crtc_state)
7740                 return -EINVAL;
7741
7742         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7743         if (ret)
7744                 return ret;
7745
7746         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7747         if (ret)
7748                 return ret;
7749
7750         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7751                 return 0;
7752
7753         return -EINVAL;
7754 }
7755
7756 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7757                                        struct drm_atomic_state *state)
7758 {
7759         /* Only support async updates on cursor planes. */
7760         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7761                 return -EINVAL;
7762
7763         return 0;
7764 }
7765
7766 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7767                                          struct drm_atomic_state *state)
7768 {
7769         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7770                                                                            plane);
7771         struct drm_plane_state *old_state =
7772                 drm_atomic_get_old_plane_state(state, plane);
7773
7774         trace_amdgpu_dm_atomic_update_cursor(new_state);
7775
7776         swap(plane->state->fb, new_state->fb);
7777
7778         plane->state->src_x = new_state->src_x;
7779         plane->state->src_y = new_state->src_y;
7780         plane->state->src_w = new_state->src_w;
7781         plane->state->src_h = new_state->src_h;
7782         plane->state->crtc_x = new_state->crtc_x;
7783         plane->state->crtc_y = new_state->crtc_y;
7784         plane->state->crtc_w = new_state->crtc_w;
7785         plane->state->crtc_h = new_state->crtc_h;
7786
7787         handle_cursor_update(plane, old_state);
7788 }
7789
7790 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7791         .prepare_fb = dm_plane_helper_prepare_fb,
7792         .cleanup_fb = dm_plane_helper_cleanup_fb,
7793         .atomic_check = dm_plane_atomic_check,
7794         .atomic_async_check = dm_plane_atomic_async_check,
7795         .atomic_async_update = dm_plane_atomic_async_update
7796 };
7797
7798 /*
7799  * TODO: these are currently initialized to rgb formats only.
7800  * For future use cases we should either initialize them dynamically based on
7801  * plane capabilities, or initialize this array to all formats, so internal drm
7802  * check will succeed, and let DC implement proper check
7803  */
7804 static const uint32_t rgb_formats[] = {
7805         DRM_FORMAT_XRGB8888,
7806         DRM_FORMAT_ARGB8888,
7807         DRM_FORMAT_RGBA8888,
7808         DRM_FORMAT_XRGB2101010,
7809         DRM_FORMAT_XBGR2101010,
7810         DRM_FORMAT_ARGB2101010,
7811         DRM_FORMAT_ABGR2101010,
7812         DRM_FORMAT_XRGB16161616,
7813         DRM_FORMAT_XBGR16161616,
7814         DRM_FORMAT_ARGB16161616,
7815         DRM_FORMAT_ABGR16161616,
7816         DRM_FORMAT_XBGR8888,
7817         DRM_FORMAT_ABGR8888,
7818         DRM_FORMAT_RGB565,
7819 };
7820
7821 static const uint32_t overlay_formats[] = {
7822         DRM_FORMAT_XRGB8888,
7823         DRM_FORMAT_ARGB8888,
7824         DRM_FORMAT_RGBA8888,
7825         DRM_FORMAT_XBGR8888,
7826         DRM_FORMAT_ABGR8888,
7827         DRM_FORMAT_RGB565
7828 };
7829
7830 static const u32 cursor_formats[] = {
7831         DRM_FORMAT_ARGB8888
7832 };
7833
7834 static int get_plane_formats(const struct drm_plane *plane,
7835                              const struct dc_plane_cap *plane_cap,
7836                              uint32_t *formats, int max_formats)
7837 {
7838         int i, num_formats = 0;
7839
7840         /*
7841          * TODO: Query support for each group of formats directly from
7842          * DC plane caps. This will require adding more formats to the
7843          * caps list.
7844          */
7845
7846         switch (plane->type) {
7847         case DRM_PLANE_TYPE_PRIMARY:
7848                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7849                         if (num_formats >= max_formats)
7850                                 break;
7851
7852                         formats[num_formats++] = rgb_formats[i];
7853                 }
7854
7855                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7856                         formats[num_formats++] = DRM_FORMAT_NV12;
7857                 if (plane_cap && plane_cap->pixel_format_support.p010)
7858                         formats[num_formats++] = DRM_FORMAT_P010;
7859                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7860                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7861                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7862                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7863                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7864                 }
7865                 break;
7866
7867         case DRM_PLANE_TYPE_OVERLAY:
7868                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7869                         if (num_formats >= max_formats)
7870                                 break;
7871
7872                         formats[num_formats++] = overlay_formats[i];
7873                 }
7874                 break;
7875
7876         case DRM_PLANE_TYPE_CURSOR:
7877                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7878                         if (num_formats >= max_formats)
7879                                 break;
7880
7881                         formats[num_formats++] = cursor_formats[i];
7882                 }
7883                 break;
7884         }
7885
7886         return num_formats;
7887 }
7888
7889 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7890                                 struct drm_plane *plane,
7891                                 unsigned long possible_crtcs,
7892                                 const struct dc_plane_cap *plane_cap)
7893 {
7894         uint32_t formats[32];
7895         int num_formats;
7896         int res = -EPERM;
7897         unsigned int supported_rotations;
7898         uint64_t *modifiers = NULL;
7899
7900         num_formats = get_plane_formats(plane, plane_cap, formats,
7901                                         ARRAY_SIZE(formats));
7902
7903         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7904         if (res)
7905                 return res;
7906
7907         if (modifiers == NULL)
7908                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7909
7910         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7911                                        &dm_plane_funcs, formats, num_formats,
7912                                        modifiers, plane->type, NULL);
7913         kfree(modifiers);
7914         if (res)
7915                 return res;
7916
7917         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7918             plane_cap && plane_cap->per_pixel_alpha) {
7919                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7920                                           BIT(DRM_MODE_BLEND_PREMULTI) |
7921                                           BIT(DRM_MODE_BLEND_COVERAGE);
7922
7923                 drm_plane_create_alpha_property(plane);
7924                 drm_plane_create_blend_mode_property(plane, blend_caps);
7925         }
7926
7927         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7928             plane_cap &&
7929             (plane_cap->pixel_format_support.nv12 ||
7930              plane_cap->pixel_format_support.p010)) {
7931                 /* This only affects YUV formats. */
7932                 drm_plane_create_color_properties(
7933                         plane,
7934                         BIT(DRM_COLOR_YCBCR_BT601) |
7935                         BIT(DRM_COLOR_YCBCR_BT709) |
7936                         BIT(DRM_COLOR_YCBCR_BT2020),
7937                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7938                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7939                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7940         }
7941
7942         supported_rotations =
7943                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7944                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7945
7946         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7947             plane->type != DRM_PLANE_TYPE_CURSOR)
7948                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7949                                                    supported_rotations);
7950
7951         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7952
7953         /* Create (reset) the plane state */
7954         if (plane->funcs->reset)
7955                 plane->funcs->reset(plane);
7956
7957         return 0;
7958 }
7959
7960 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7961                                struct drm_plane *plane,
7962                                uint32_t crtc_index)
7963 {
7964         struct amdgpu_crtc *acrtc = NULL;
7965         struct drm_plane *cursor_plane;
7966
7967         int res = -ENOMEM;
7968
7969         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7970         if (!cursor_plane)
7971                 goto fail;
7972
7973         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7974         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7975
7976         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7977         if (!acrtc)
7978                 goto fail;
7979
7980         res = drm_crtc_init_with_planes(
7981                         dm->ddev,
7982                         &acrtc->base,
7983                         plane,
7984                         cursor_plane,
7985                         &amdgpu_dm_crtc_funcs, NULL);
7986
7987         if (res)
7988                 goto fail;
7989
7990         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7991
7992         /* Create (reset) the plane state */
7993         if (acrtc->base.funcs->reset)
7994                 acrtc->base.funcs->reset(&acrtc->base);
7995
7996         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7997         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7998
7999         acrtc->crtc_id = crtc_index;
8000         acrtc->base.enabled = false;
8001         acrtc->otg_inst = -1;
8002
8003         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8004         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8005                                    true, MAX_COLOR_LUT_ENTRIES);
8006         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8007
8008         return 0;
8009
8010 fail:
8011         kfree(acrtc);
8012         kfree(cursor_plane);
8013         return res;
8014 }
8015
8016
8017 static int to_drm_connector_type(enum signal_type st)
8018 {
8019         switch (st) {
8020         case SIGNAL_TYPE_HDMI_TYPE_A:
8021                 return DRM_MODE_CONNECTOR_HDMIA;
8022         case SIGNAL_TYPE_EDP:
8023                 return DRM_MODE_CONNECTOR_eDP;
8024         case SIGNAL_TYPE_LVDS:
8025                 return DRM_MODE_CONNECTOR_LVDS;
8026         case SIGNAL_TYPE_RGB:
8027                 return DRM_MODE_CONNECTOR_VGA;
8028         case SIGNAL_TYPE_DISPLAY_PORT:
8029         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8030                 return DRM_MODE_CONNECTOR_DisplayPort;
8031         case SIGNAL_TYPE_DVI_DUAL_LINK:
8032         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8033                 return DRM_MODE_CONNECTOR_DVID;
8034         case SIGNAL_TYPE_VIRTUAL:
8035                 return DRM_MODE_CONNECTOR_VIRTUAL;
8036
8037         default:
8038                 return DRM_MODE_CONNECTOR_Unknown;
8039         }
8040 }
8041
8042 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8043 {
8044         struct drm_encoder *encoder;
8045
8046         /* There is only one encoder per connector */
8047         drm_connector_for_each_possible_encoder(connector, encoder)
8048                 return encoder;
8049
8050         return NULL;
8051 }
8052
8053 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8054 {
8055         struct drm_encoder *encoder;
8056         struct amdgpu_encoder *amdgpu_encoder;
8057
8058         encoder = amdgpu_dm_connector_to_encoder(connector);
8059
8060         if (encoder == NULL)
8061                 return;
8062
8063         amdgpu_encoder = to_amdgpu_encoder(encoder);
8064
8065         amdgpu_encoder->native_mode.clock = 0;
8066
8067         if (!list_empty(&connector->probed_modes)) {
8068                 struct drm_display_mode *preferred_mode = NULL;
8069
8070                 list_for_each_entry(preferred_mode,
8071                                     &connector->probed_modes,
8072                                     head) {
8073                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8074                                 amdgpu_encoder->native_mode = *preferred_mode;
8075
8076                         break;
8077                 }
8078
8079         }
8080 }
8081
8082 static struct drm_display_mode *
8083 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8084                              char *name,
8085                              int hdisplay, int vdisplay)
8086 {
8087         struct drm_device *dev = encoder->dev;
8088         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8089         struct drm_display_mode *mode = NULL;
8090         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8091
8092         mode = drm_mode_duplicate(dev, native_mode);
8093
8094         if (mode == NULL)
8095                 return NULL;
8096
8097         mode->hdisplay = hdisplay;
8098         mode->vdisplay = vdisplay;
8099         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8100         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8101
8102         return mode;
8103
8104 }
8105
8106 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8107                                                  struct drm_connector *connector)
8108 {
8109         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8110         struct drm_display_mode *mode = NULL;
8111         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8112         struct amdgpu_dm_connector *amdgpu_dm_connector =
8113                                 to_amdgpu_dm_connector(connector);
8114         int i;
8115         int n;
8116         struct mode_size {
8117                 char name[DRM_DISPLAY_MODE_LEN];
8118                 int w;
8119                 int h;
8120         } common_modes[] = {
8121                 {  "640x480",  640,  480},
8122                 {  "800x600",  800,  600},
8123                 { "1024x768", 1024,  768},
8124                 { "1280x720", 1280,  720},
8125                 { "1280x800", 1280,  800},
8126                 {"1280x1024", 1280, 1024},
8127                 { "1440x900", 1440,  900},
8128                 {"1680x1050", 1680, 1050},
8129                 {"1600x1200", 1600, 1200},
8130                 {"1920x1080", 1920, 1080},
8131                 {"1920x1200", 1920, 1200}
8132         };
8133
8134         n = ARRAY_SIZE(common_modes);
8135
8136         for (i = 0; i < n; i++) {
8137                 struct drm_display_mode *curmode = NULL;
8138                 bool mode_existed = false;
8139
8140                 if (common_modes[i].w > native_mode->hdisplay ||
8141                     common_modes[i].h > native_mode->vdisplay ||
8142                    (common_modes[i].w == native_mode->hdisplay &&
8143                     common_modes[i].h == native_mode->vdisplay))
8144                         continue;
8145
8146                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8147                         if (common_modes[i].w == curmode->hdisplay &&
8148                             common_modes[i].h == curmode->vdisplay) {
8149                                 mode_existed = true;
8150                                 break;
8151                         }
8152                 }
8153
8154                 if (mode_existed)
8155                         continue;
8156
8157                 mode = amdgpu_dm_create_common_mode(encoder,
8158                                 common_modes[i].name, common_modes[i].w,
8159                                 common_modes[i].h);
8160                 if (!mode)
8161                         continue;
8162
8163                 drm_mode_probed_add(connector, mode);
8164                 amdgpu_dm_connector->num_modes++;
8165         }
8166 }
8167
8168 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8169 {
8170         struct drm_encoder *encoder;
8171         struct amdgpu_encoder *amdgpu_encoder;
8172         const struct drm_display_mode *native_mode;
8173
8174         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8175             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8176                 return;
8177
8178         encoder = amdgpu_dm_connector_to_encoder(connector);
8179         if (!encoder)
8180                 return;
8181
8182         amdgpu_encoder = to_amdgpu_encoder(encoder);
8183
8184         native_mode = &amdgpu_encoder->native_mode;
8185         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8186                 return;
8187
8188         drm_connector_set_panel_orientation_with_quirk(connector,
8189                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8190                                                        native_mode->hdisplay,
8191                                                        native_mode->vdisplay);
8192 }
8193
8194 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8195                                               struct edid *edid)
8196 {
8197         struct amdgpu_dm_connector *amdgpu_dm_connector =
8198                         to_amdgpu_dm_connector(connector);
8199
8200         if (edid) {
8201                 /* empty probed_modes */
8202                 INIT_LIST_HEAD(&connector->probed_modes);
8203                 amdgpu_dm_connector->num_modes =
8204                                 drm_add_edid_modes(connector, edid);
8205
8206                 /* sorting the probed modes before calling function
8207                  * amdgpu_dm_get_native_mode() since EDID can have
8208                  * more than one preferred mode. The modes that are
8209                  * later in the probed mode list could be of higher
8210                  * and preferred resolution. For example, 3840x2160
8211                  * resolution in base EDID preferred timing and 4096x2160
8212                  * preferred resolution in DID extension block later.
8213                  */
8214                 drm_mode_sort(&connector->probed_modes);
8215                 amdgpu_dm_get_native_mode(connector);
8216
8217                 /* Freesync capabilities are reset by calling
8218                  * drm_add_edid_modes() and need to be
8219                  * restored here.
8220                  */
8221                 amdgpu_dm_update_freesync_caps(connector, edid);
8222
8223                 amdgpu_set_panel_orientation(connector);
8224         } else {
8225                 amdgpu_dm_connector->num_modes = 0;
8226         }
8227 }
8228
8229 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8230                               struct drm_display_mode *mode)
8231 {
8232         struct drm_display_mode *m;
8233
8234         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8235                 if (drm_mode_equal(m, mode))
8236                         return true;
8237         }
8238
8239         return false;
8240 }
8241
8242 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8243 {
8244         const struct drm_display_mode *m;
8245         struct drm_display_mode *new_mode;
8246         uint i;
8247         uint32_t new_modes_count = 0;
8248
8249         /* Standard FPS values
8250          *
8251          * 23.976       - TV/NTSC
8252          * 24           - Cinema
8253          * 25           - TV/PAL
8254          * 29.97        - TV/NTSC
8255          * 30           - TV/NTSC
8256          * 48           - Cinema HFR
8257          * 50           - TV/PAL
8258          * 60           - Commonly used
8259          * 48,72,96,120 - Multiples of 24
8260          */
8261         static const uint32_t common_rates[] = {
8262                 23976, 24000, 25000, 29970, 30000,
8263                 48000, 50000, 60000, 72000, 96000, 120000
8264         };
8265
8266         /*
8267          * Find mode with highest refresh rate with the same resolution
8268          * as the preferred mode. Some monitors report a preferred mode
8269          * with lower resolution than the highest refresh rate supported.
8270          */
8271
8272         m = get_highest_refresh_rate_mode(aconnector, true);
8273         if (!m)
8274                 return 0;
8275
8276         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8277                 uint64_t target_vtotal, target_vtotal_diff;
8278                 uint64_t num, den;
8279
8280                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8281                         continue;
8282
8283                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8284                     common_rates[i] > aconnector->max_vfreq * 1000)
8285                         continue;
8286
8287                 num = (unsigned long long)m->clock * 1000 * 1000;
8288                 den = common_rates[i] * (unsigned long long)m->htotal;
8289                 target_vtotal = div_u64(num, den);
8290                 target_vtotal_diff = target_vtotal - m->vtotal;
8291
8292                 /* Check for illegal modes */
8293                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8294                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8295                     m->vtotal + target_vtotal_diff < m->vsync_end)
8296                         continue;
8297
8298                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8299                 if (!new_mode)
8300                         goto out;
8301
8302                 new_mode->vtotal += (u16)target_vtotal_diff;
8303                 new_mode->vsync_start += (u16)target_vtotal_diff;
8304                 new_mode->vsync_end += (u16)target_vtotal_diff;
8305                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8306                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8307
8308                 if (!is_duplicate_mode(aconnector, new_mode)) {
8309                         drm_mode_probed_add(&aconnector->base, new_mode);
8310                         new_modes_count += 1;
8311                 } else
8312                         drm_mode_destroy(aconnector->base.dev, new_mode);
8313         }
8314  out:
8315         return new_modes_count;
8316 }
8317
8318 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8319                                                    struct edid *edid)
8320 {
8321         struct amdgpu_dm_connector *amdgpu_dm_connector =
8322                 to_amdgpu_dm_connector(connector);
8323
8324         if (!edid)
8325                 return;
8326
8327         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8328                 amdgpu_dm_connector->num_modes +=
8329                         add_fs_modes(amdgpu_dm_connector);
8330 }
8331
8332 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8333 {
8334         struct amdgpu_dm_connector *amdgpu_dm_connector =
8335                         to_amdgpu_dm_connector(connector);
8336         struct drm_encoder *encoder;
8337         struct edid *edid = amdgpu_dm_connector->edid;
8338
8339         encoder = amdgpu_dm_connector_to_encoder(connector);
8340
8341         if (!drm_edid_is_valid(edid)) {
8342                 amdgpu_dm_connector->num_modes =
8343                                 drm_add_modes_noedid(connector, 640, 480);
8344         } else {
8345                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8346                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8347                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8348         }
8349         amdgpu_dm_fbc_init(connector);
8350
8351         return amdgpu_dm_connector->num_modes;
8352 }
8353
8354 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8355                                      struct amdgpu_dm_connector *aconnector,
8356                                      int connector_type,
8357                                      struct dc_link *link,
8358                                      int link_index)
8359 {
8360         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8361
8362         /*
8363          * Some of the properties below require access to state, like bpc.
8364          * Allocate some default initial connector state with our reset helper.
8365          */
8366         if (aconnector->base.funcs->reset)
8367                 aconnector->base.funcs->reset(&aconnector->base);
8368
8369         aconnector->connector_id = link_index;
8370         aconnector->dc_link = link;
8371         aconnector->base.interlace_allowed = false;
8372         aconnector->base.doublescan_allowed = false;
8373         aconnector->base.stereo_allowed = false;
8374         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8375         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8376         aconnector->audio_inst = -1;
8377         mutex_init(&aconnector->hpd_lock);
8378
8379         /*
8380          * configure support HPD hot plug connector_>polled default value is 0
8381          * which means HPD hot plug not supported
8382          */
8383         switch (connector_type) {
8384         case DRM_MODE_CONNECTOR_HDMIA:
8385                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8386                 aconnector->base.ycbcr_420_allowed =
8387                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8388                 break;
8389         case DRM_MODE_CONNECTOR_DisplayPort:
8390                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8391                 link->link_enc = link_enc_cfg_get_link_enc(link);
8392                 ASSERT(link->link_enc);
8393                 if (link->link_enc)
8394                         aconnector->base.ycbcr_420_allowed =
8395                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8396                 break;
8397         case DRM_MODE_CONNECTOR_DVID:
8398                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8399                 break;
8400         default:
8401                 break;
8402         }
8403
8404         drm_object_attach_property(&aconnector->base.base,
8405                                 dm->ddev->mode_config.scaling_mode_property,
8406                                 DRM_MODE_SCALE_NONE);
8407
8408         drm_object_attach_property(&aconnector->base.base,
8409                                 adev->mode_info.underscan_property,
8410                                 UNDERSCAN_OFF);
8411         drm_object_attach_property(&aconnector->base.base,
8412                                 adev->mode_info.underscan_hborder_property,
8413                                 0);
8414         drm_object_attach_property(&aconnector->base.base,
8415                                 adev->mode_info.underscan_vborder_property,
8416                                 0);
8417
8418         if (!aconnector->mst_port)
8419                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8420
8421         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8422         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8423         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8424
8425         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8426             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8427                 drm_object_attach_property(&aconnector->base.base,
8428                                 adev->mode_info.abm_level_property, 0);
8429         }
8430
8431         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8432             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8433             connector_type == DRM_MODE_CONNECTOR_eDP) {
8434                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8435
8436                 if (!aconnector->mst_port)
8437                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8438
8439 #ifdef CONFIG_DRM_AMD_DC_HDCP
8440                 if (adev->dm.hdcp_workqueue)
8441                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8442 #endif
8443         }
8444 }
8445
8446 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8447                               struct i2c_msg *msgs, int num)
8448 {
8449         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8450         struct ddc_service *ddc_service = i2c->ddc_service;
8451         struct i2c_command cmd;
8452         int i;
8453         int result = -EIO;
8454
8455         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8456
8457         if (!cmd.payloads)
8458                 return result;
8459
8460         cmd.number_of_payloads = num;
8461         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8462         cmd.speed = 100;
8463
8464         for (i = 0; i < num; i++) {
8465                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8466                 cmd.payloads[i].address = msgs[i].addr;
8467                 cmd.payloads[i].length = msgs[i].len;
8468                 cmd.payloads[i].data = msgs[i].buf;
8469         }
8470
8471         if (dc_submit_i2c(
8472                         ddc_service->ctx->dc,
8473                         ddc_service->ddc_pin->hw_info.ddc_channel,
8474                         &cmd))
8475                 result = num;
8476
8477         kfree(cmd.payloads);
8478         return result;
8479 }
8480
8481 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8482 {
8483         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8484 }
8485
8486 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8487         .master_xfer = amdgpu_dm_i2c_xfer,
8488         .functionality = amdgpu_dm_i2c_func,
8489 };
8490
8491 static struct amdgpu_i2c_adapter *
8492 create_i2c(struct ddc_service *ddc_service,
8493            int link_index,
8494            int *res)
8495 {
8496         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8497         struct amdgpu_i2c_adapter *i2c;
8498
8499         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8500         if (!i2c)
8501                 return NULL;
8502         i2c->base.owner = THIS_MODULE;
8503         i2c->base.class = I2C_CLASS_DDC;
8504         i2c->base.dev.parent = &adev->pdev->dev;
8505         i2c->base.algo = &amdgpu_dm_i2c_algo;
8506         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8507         i2c_set_adapdata(&i2c->base, i2c);
8508         i2c->ddc_service = ddc_service;
8509         if (i2c->ddc_service->ddc_pin)
8510                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8511
8512         return i2c;
8513 }
8514
8515
8516 /*
8517  * Note: this function assumes that dc_link_detect() was called for the
8518  * dc_link which will be represented by this aconnector.
8519  */
8520 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8521                                     struct amdgpu_dm_connector *aconnector,
8522                                     uint32_t link_index,
8523                                     struct amdgpu_encoder *aencoder)
8524 {
8525         int res = 0;
8526         int connector_type;
8527         struct dc *dc = dm->dc;
8528         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8529         struct amdgpu_i2c_adapter *i2c;
8530
8531         link->priv = aconnector;
8532
8533         DRM_DEBUG_DRIVER("%s()\n", __func__);
8534
8535         i2c = create_i2c(link->ddc, link->link_index, &res);
8536         if (!i2c) {
8537                 DRM_ERROR("Failed to create i2c adapter data\n");
8538                 return -ENOMEM;
8539         }
8540
8541         aconnector->i2c = i2c;
8542         res = i2c_add_adapter(&i2c->base);
8543
8544         if (res) {
8545                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8546                 goto out_free;
8547         }
8548
8549         connector_type = to_drm_connector_type(link->connector_signal);
8550
8551         res = drm_connector_init_with_ddc(
8552                         dm->ddev,
8553                         &aconnector->base,
8554                         &amdgpu_dm_connector_funcs,
8555                         connector_type,
8556                         &i2c->base);
8557
8558         if (res) {
8559                 DRM_ERROR("connector_init failed\n");
8560                 aconnector->connector_id = -1;
8561                 goto out_free;
8562         }
8563
8564         drm_connector_helper_add(
8565                         &aconnector->base,
8566                         &amdgpu_dm_connector_helper_funcs);
8567
8568         amdgpu_dm_connector_init_helper(
8569                 dm,
8570                 aconnector,
8571                 connector_type,
8572                 link,
8573                 link_index);
8574
8575         drm_connector_attach_encoder(
8576                 &aconnector->base, &aencoder->base);
8577
8578         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8579                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8580                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8581
8582 out_free:
8583         if (res) {
8584                 kfree(i2c);
8585                 aconnector->i2c = NULL;
8586         }
8587         return res;
8588 }
8589
8590 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8591 {
8592         switch (adev->mode_info.num_crtc) {
8593         case 1:
8594                 return 0x1;
8595         case 2:
8596                 return 0x3;
8597         case 3:
8598                 return 0x7;
8599         case 4:
8600                 return 0xf;
8601         case 5:
8602                 return 0x1f;
8603         case 6:
8604         default:
8605                 return 0x3f;
8606         }
8607 }
8608
8609 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8610                                   struct amdgpu_encoder *aencoder,
8611                                   uint32_t link_index)
8612 {
8613         struct amdgpu_device *adev = drm_to_adev(dev);
8614
8615         int res = drm_encoder_init(dev,
8616                                    &aencoder->base,
8617                                    &amdgpu_dm_encoder_funcs,
8618                                    DRM_MODE_ENCODER_TMDS,
8619                                    NULL);
8620
8621         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8622
8623         if (!res)
8624                 aencoder->encoder_id = link_index;
8625         else
8626                 aencoder->encoder_id = -1;
8627
8628         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8629
8630         return res;
8631 }
8632
8633 static void manage_dm_interrupts(struct amdgpu_device *adev,
8634                                  struct amdgpu_crtc *acrtc,
8635                                  bool enable)
8636 {
8637         /*
8638          * We have no guarantee that the frontend index maps to the same
8639          * backend index - some even map to more than one.
8640          *
8641          * TODO: Use a different interrupt or check DC itself for the mapping.
8642          */
8643         int irq_type =
8644                 amdgpu_display_crtc_idx_to_irq_type(
8645                         adev,
8646                         acrtc->crtc_id);
8647
8648         if (enable) {
8649                 drm_crtc_vblank_on(&acrtc->base);
8650                 amdgpu_irq_get(
8651                         adev,
8652                         &adev->pageflip_irq,
8653                         irq_type);
8654 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8655                 amdgpu_irq_get(
8656                         adev,
8657                         &adev->vline0_irq,
8658                         irq_type);
8659 #endif
8660         } else {
8661 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8662                 amdgpu_irq_put(
8663                         adev,
8664                         &adev->vline0_irq,
8665                         irq_type);
8666 #endif
8667                 amdgpu_irq_put(
8668                         adev,
8669                         &adev->pageflip_irq,
8670                         irq_type);
8671                 drm_crtc_vblank_off(&acrtc->base);
8672         }
8673 }
8674
8675 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8676                                       struct amdgpu_crtc *acrtc)
8677 {
8678         int irq_type =
8679                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8680
8681         /**
8682          * This reads the current state for the IRQ and force reapplies
8683          * the setting to hardware.
8684          */
8685         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8686 }
8687
8688 static bool
8689 is_scaling_state_different(const struct dm_connector_state *dm_state,
8690                            const struct dm_connector_state *old_dm_state)
8691 {
8692         if (dm_state->scaling != old_dm_state->scaling)
8693                 return true;
8694         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8695                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8696                         return true;
8697         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8698                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8699                         return true;
8700         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8701                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8702                 return true;
8703         return false;
8704 }
8705
8706 #ifdef CONFIG_DRM_AMD_DC_HDCP
8707 static bool is_content_protection_different(struct drm_connector_state *state,
8708                                             const struct drm_connector_state *old_state,
8709                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8710 {
8711         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8712         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8713
8714         /* Handle: Type0/1 change */
8715         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8716             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8717                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8718                 return true;
8719         }
8720
8721         /* CP is being re enabled, ignore this
8722          *
8723          * Handles:     ENABLED -> DESIRED
8724          */
8725         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8726             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8727                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8728                 return false;
8729         }
8730
8731         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8732          *
8733          * Handles:     UNDESIRED -> ENABLED
8734          */
8735         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8736             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8737                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8738
8739         /* Stream removed and re-enabled
8740          *
8741          * Can sometimes overlap with the HPD case,
8742          * thus set update_hdcp to false to avoid
8743          * setting HDCP multiple times.
8744          *
8745          * Handles:     DESIRED -> DESIRED (Special case)
8746          */
8747         if (!(old_state->crtc && old_state->crtc->enabled) &&
8748                 state->crtc && state->crtc->enabled &&
8749                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8750                 dm_con_state->update_hdcp = false;
8751                 return true;
8752         }
8753
8754         /* Hot-plug, headless s3, dpms
8755          *
8756          * Only start HDCP if the display is connected/enabled.
8757          * update_hdcp flag will be set to false until the next
8758          * HPD comes in.
8759          *
8760          * Handles:     DESIRED -> DESIRED (Special case)
8761          */
8762         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8763             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8764                 dm_con_state->update_hdcp = false;
8765                 return true;
8766         }
8767
8768         /*
8769          * Handles:     UNDESIRED -> UNDESIRED
8770          *              DESIRED -> DESIRED
8771          *              ENABLED -> ENABLED
8772          */
8773         if (old_state->content_protection == state->content_protection)
8774                 return false;
8775
8776         /*
8777          * Handles:     UNDESIRED -> DESIRED
8778          *              DESIRED -> UNDESIRED
8779          *              ENABLED -> UNDESIRED
8780          */
8781         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8782                 return true;
8783
8784         /*
8785          * Handles:     DESIRED -> ENABLED
8786          */
8787         return false;
8788 }
8789
8790 #endif
8791 static void remove_stream(struct amdgpu_device *adev,
8792                           struct amdgpu_crtc *acrtc,
8793                           struct dc_stream_state *stream)
8794 {
8795         /* this is the update mode case */
8796
8797         acrtc->otg_inst = -1;
8798         acrtc->enabled = false;
8799 }
8800
8801 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8802                                struct dc_cursor_position *position)
8803 {
8804         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8805         int x, y;
8806         int xorigin = 0, yorigin = 0;
8807
8808         if (!crtc || !plane->state->fb)
8809                 return 0;
8810
8811         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8812             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8813                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8814                           __func__,
8815                           plane->state->crtc_w,
8816                           plane->state->crtc_h);
8817                 return -EINVAL;
8818         }
8819
8820         x = plane->state->crtc_x;
8821         y = plane->state->crtc_y;
8822
8823         if (x <= -amdgpu_crtc->max_cursor_width ||
8824             y <= -amdgpu_crtc->max_cursor_height)
8825                 return 0;
8826
8827         if (x < 0) {
8828                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8829                 x = 0;
8830         }
8831         if (y < 0) {
8832                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8833                 y = 0;
8834         }
8835         position->enable = true;
8836         position->translate_by_source = true;
8837         position->x = x;
8838         position->y = y;
8839         position->x_hotspot = xorigin;
8840         position->y_hotspot = yorigin;
8841
8842         return 0;
8843 }
8844
8845 static void handle_cursor_update(struct drm_plane *plane,
8846                                  struct drm_plane_state *old_plane_state)
8847 {
8848         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8849         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8850         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8851         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8852         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8853         uint64_t address = afb ? afb->address : 0;
8854         struct dc_cursor_position position = {0};
8855         struct dc_cursor_attributes attributes;
8856         int ret;
8857
8858         if (!plane->state->fb && !old_plane_state->fb)
8859                 return;
8860
8861         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8862                       __func__,
8863                       amdgpu_crtc->crtc_id,
8864                       plane->state->crtc_w,
8865                       plane->state->crtc_h);
8866
8867         ret = get_cursor_position(plane, crtc, &position);
8868         if (ret)
8869                 return;
8870
8871         if (!position.enable) {
8872                 /* turn off cursor */
8873                 if (crtc_state && crtc_state->stream) {
8874                         mutex_lock(&adev->dm.dc_lock);
8875                         dc_stream_set_cursor_position(crtc_state->stream,
8876                                                       &position);
8877                         mutex_unlock(&adev->dm.dc_lock);
8878                 }
8879                 return;
8880         }
8881
8882         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8883         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8884
8885         memset(&attributes, 0, sizeof(attributes));
8886         attributes.address.high_part = upper_32_bits(address);
8887         attributes.address.low_part  = lower_32_bits(address);
8888         attributes.width             = plane->state->crtc_w;
8889         attributes.height            = plane->state->crtc_h;
8890         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8891         attributes.rotation_angle    = 0;
8892         attributes.attribute_flags.value = 0;
8893
8894         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8895
8896         if (crtc_state->stream) {
8897                 mutex_lock(&adev->dm.dc_lock);
8898                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8899                                                          &attributes))
8900                         DRM_ERROR("DC failed to set cursor attributes\n");
8901
8902                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8903                                                    &position))
8904                         DRM_ERROR("DC failed to set cursor position\n");
8905                 mutex_unlock(&adev->dm.dc_lock);
8906         }
8907 }
8908
8909 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8910 {
8911
8912         assert_spin_locked(&acrtc->base.dev->event_lock);
8913         WARN_ON(acrtc->event);
8914
8915         acrtc->event = acrtc->base.state->event;
8916
8917         /* Set the flip status */
8918         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8919
8920         /* Mark this event as consumed */
8921         acrtc->base.state->event = NULL;
8922
8923         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8924                      acrtc->crtc_id);
8925 }
8926
8927 static void update_freesync_state_on_stream(
8928         struct amdgpu_display_manager *dm,
8929         struct dm_crtc_state *new_crtc_state,
8930         struct dc_stream_state *new_stream,
8931         struct dc_plane_state *surface,
8932         u32 flip_timestamp_in_us)
8933 {
8934         struct mod_vrr_params vrr_params;
8935         struct dc_info_packet vrr_infopacket = {0};
8936         struct amdgpu_device *adev = dm->adev;
8937         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8938         unsigned long flags;
8939         bool pack_sdp_v1_3 = false;
8940
8941         if (!new_stream)
8942                 return;
8943
8944         /*
8945          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8946          * For now it's sufficient to just guard against these conditions.
8947          */
8948
8949         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8950                 return;
8951
8952         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8953         vrr_params = acrtc->dm_irq_params.vrr_params;
8954
8955         if (surface) {
8956                 mod_freesync_handle_preflip(
8957                         dm->freesync_module,
8958                         surface,
8959                         new_stream,
8960                         flip_timestamp_in_us,
8961                         &vrr_params);
8962
8963                 if (adev->family < AMDGPU_FAMILY_AI &&
8964                     amdgpu_dm_vrr_active(new_crtc_state)) {
8965                         mod_freesync_handle_v_update(dm->freesync_module,
8966                                                      new_stream, &vrr_params);
8967
8968                         /* Need to call this before the frame ends. */
8969                         dc_stream_adjust_vmin_vmax(dm->dc,
8970                                                    new_crtc_state->stream,
8971                                                    &vrr_params.adjust);
8972                 }
8973         }
8974
8975         mod_freesync_build_vrr_infopacket(
8976                 dm->freesync_module,
8977                 new_stream,
8978                 &vrr_params,
8979                 PACKET_TYPE_VRR,
8980                 TRANSFER_FUNC_UNKNOWN,
8981                 &vrr_infopacket,
8982                 pack_sdp_v1_3);
8983
8984         new_crtc_state->freesync_timing_changed |=
8985                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8986                         &vrr_params.adjust,
8987                         sizeof(vrr_params.adjust)) != 0);
8988
8989         new_crtc_state->freesync_vrr_info_changed |=
8990                 (memcmp(&new_crtc_state->vrr_infopacket,
8991                         &vrr_infopacket,
8992                         sizeof(vrr_infopacket)) != 0);
8993
8994         acrtc->dm_irq_params.vrr_params = vrr_params;
8995         new_crtc_state->vrr_infopacket = vrr_infopacket;
8996
8997         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8998         new_stream->vrr_infopacket = vrr_infopacket;
8999
9000         if (new_crtc_state->freesync_vrr_info_changed)
9001                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
9002                               new_crtc_state->base.crtc->base.id,
9003                               (int)new_crtc_state->base.vrr_enabled,
9004                               (int)vrr_params.state);
9005
9006         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9007 }
9008
9009 static void update_stream_irq_parameters(
9010         struct amdgpu_display_manager *dm,
9011         struct dm_crtc_state *new_crtc_state)
9012 {
9013         struct dc_stream_state *new_stream = new_crtc_state->stream;
9014         struct mod_vrr_params vrr_params;
9015         struct mod_freesync_config config = new_crtc_state->freesync_config;
9016         struct amdgpu_device *adev = dm->adev;
9017         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9018         unsigned long flags;
9019
9020         if (!new_stream)
9021                 return;
9022
9023         /*
9024          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9025          * For now it's sufficient to just guard against these conditions.
9026          */
9027         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9028                 return;
9029
9030         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9031         vrr_params = acrtc->dm_irq_params.vrr_params;
9032
9033         if (new_crtc_state->vrr_supported &&
9034             config.min_refresh_in_uhz &&
9035             config.max_refresh_in_uhz) {
9036                 /*
9037                  * if freesync compatible mode was set, config.state will be set
9038                  * in atomic check
9039                  */
9040                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9041                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9042                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9043                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9044                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9045                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9046                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9047                 } else {
9048                         config.state = new_crtc_state->base.vrr_enabled ?
9049                                                      VRR_STATE_ACTIVE_VARIABLE :
9050                                                      VRR_STATE_INACTIVE;
9051                 }
9052         } else {
9053                 config.state = VRR_STATE_UNSUPPORTED;
9054         }
9055
9056         mod_freesync_build_vrr_params(dm->freesync_module,
9057                                       new_stream,
9058                                       &config, &vrr_params);
9059
9060         new_crtc_state->freesync_timing_changed |=
9061                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9062                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9063
9064         new_crtc_state->freesync_config = config;
9065         /* Copy state for access from DM IRQ handler */
9066         acrtc->dm_irq_params.freesync_config = config;
9067         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9068         acrtc->dm_irq_params.vrr_params = vrr_params;
9069         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9070 }
9071
9072 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9073                                             struct dm_crtc_state *new_state)
9074 {
9075         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9076         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9077
9078         if (!old_vrr_active && new_vrr_active) {
9079                 /* Transition VRR inactive -> active:
9080                  * While VRR is active, we must not disable vblank irq, as a
9081                  * reenable after disable would compute bogus vblank/pflip
9082                  * timestamps if it likely happened inside display front-porch.
9083                  *
9084                  * We also need vupdate irq for the actual core vblank handling
9085                  * at end of vblank.
9086                  */
9087                 dm_set_vupdate_irq(new_state->base.crtc, true);
9088                 drm_crtc_vblank_get(new_state->base.crtc);
9089                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9090                                  __func__, new_state->base.crtc->base.id);
9091         } else if (old_vrr_active && !new_vrr_active) {
9092                 /* Transition VRR active -> inactive:
9093                  * Allow vblank irq disable again for fixed refresh rate.
9094                  */
9095                 dm_set_vupdate_irq(new_state->base.crtc, false);
9096                 drm_crtc_vblank_put(new_state->base.crtc);
9097                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9098                                  __func__, new_state->base.crtc->base.id);
9099         }
9100 }
9101
9102 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9103 {
9104         struct drm_plane *plane;
9105         struct drm_plane_state *old_plane_state;
9106         int i;
9107
9108         /*
9109          * TODO: Make this per-stream so we don't issue redundant updates for
9110          * commits with multiple streams.
9111          */
9112         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9113                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9114                         handle_cursor_update(plane, old_plane_state);
9115 }
9116
9117 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9118                                     struct dc_state *dc_state,
9119                                     struct drm_device *dev,
9120                                     struct amdgpu_display_manager *dm,
9121                                     struct drm_crtc *pcrtc,
9122                                     bool wait_for_vblank)
9123 {
9124         uint32_t i;
9125         uint64_t timestamp_ns;
9126         struct drm_plane *plane;
9127         struct drm_plane_state *old_plane_state, *new_plane_state;
9128         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9129         struct drm_crtc_state *new_pcrtc_state =
9130                         drm_atomic_get_new_crtc_state(state, pcrtc);
9131         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9132         struct dm_crtc_state *dm_old_crtc_state =
9133                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9134         int planes_count = 0, vpos, hpos;
9135         long r;
9136         unsigned long flags;
9137         struct amdgpu_bo *abo;
9138         uint32_t target_vblank, last_flip_vblank;
9139         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9140         bool pflip_present = false;
9141         struct {
9142                 struct dc_surface_update surface_updates[MAX_SURFACES];
9143                 struct dc_plane_info plane_infos[MAX_SURFACES];
9144                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9145                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9146                 struct dc_stream_update stream_update;
9147         } *bundle;
9148
9149         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9150
9151         if (!bundle) {
9152                 dm_error("Failed to allocate update bundle\n");
9153                 goto cleanup;
9154         }
9155
9156         /*
9157          * Disable the cursor first if we're disabling all the planes.
9158          * It'll remain on the screen after the planes are re-enabled
9159          * if we don't.
9160          */
9161         if (acrtc_state->active_planes == 0)
9162                 amdgpu_dm_commit_cursors(state);
9163
9164         /* update planes when needed */
9165         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9166                 struct drm_crtc *crtc = new_plane_state->crtc;
9167                 struct drm_crtc_state *new_crtc_state;
9168                 struct drm_framebuffer *fb = new_plane_state->fb;
9169                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9170                 bool plane_needs_flip;
9171                 struct dc_plane_state *dc_plane;
9172                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9173
9174                 /* Cursor plane is handled after stream updates */
9175                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9176                         continue;
9177
9178                 if (!fb || !crtc || pcrtc != crtc)
9179                         continue;
9180
9181                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9182                 if (!new_crtc_state->active)
9183                         continue;
9184
9185                 dc_plane = dm_new_plane_state->dc_state;
9186
9187                 bundle->surface_updates[planes_count].surface = dc_plane;
9188                 if (new_pcrtc_state->color_mgmt_changed) {
9189                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9190                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9191                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9192                 }
9193
9194                 fill_dc_scaling_info(dm->adev, new_plane_state,
9195                                      &bundle->scaling_infos[planes_count]);
9196
9197                 bundle->surface_updates[planes_count].scaling_info =
9198                         &bundle->scaling_infos[planes_count];
9199
9200                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9201
9202                 pflip_present = pflip_present || plane_needs_flip;
9203
9204                 if (!plane_needs_flip) {
9205                         planes_count += 1;
9206                         continue;
9207                 }
9208
9209                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9210
9211                 /*
9212                  * Wait for all fences on this FB. Do limited wait to avoid
9213                  * deadlock during GPU reset when this fence will not signal
9214                  * but we hold reservation lock for the BO.
9215                  */
9216                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9217                                           DMA_RESV_USAGE_WRITE, false,
9218                                           msecs_to_jiffies(5000));
9219                 if (unlikely(r <= 0))
9220                         DRM_ERROR("Waiting for fences timed out!");
9221
9222                 fill_dc_plane_info_and_addr(
9223                         dm->adev, new_plane_state,
9224                         afb->tiling_flags,
9225                         &bundle->plane_infos[planes_count],
9226                         &bundle->flip_addrs[planes_count].address,
9227                         afb->tmz_surface, false);
9228
9229                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9230                                  new_plane_state->plane->index,
9231                                  bundle->plane_infos[planes_count].dcc.enable);
9232
9233                 bundle->surface_updates[planes_count].plane_info =
9234                         &bundle->plane_infos[planes_count];
9235
9236                 /*
9237                  * Only allow immediate flips for fast updates that don't
9238                  * change FB pitch, DCC state, rotation or mirroing.
9239                  */
9240                 bundle->flip_addrs[planes_count].flip_immediate =
9241                         crtc->state->async_flip &&
9242                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9243
9244                 timestamp_ns = ktime_get_ns();
9245                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9246                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9247                 bundle->surface_updates[planes_count].surface = dc_plane;
9248
9249                 if (!bundle->surface_updates[planes_count].surface) {
9250                         DRM_ERROR("No surface for CRTC: id=%d\n",
9251                                         acrtc_attach->crtc_id);
9252                         continue;
9253                 }
9254
9255                 if (plane == pcrtc->primary)
9256                         update_freesync_state_on_stream(
9257                                 dm,
9258                                 acrtc_state,
9259                                 acrtc_state->stream,
9260                                 dc_plane,
9261                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9262
9263                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9264                                  __func__,
9265                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9266                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9267
9268                 planes_count += 1;
9269
9270         }
9271
9272         if (pflip_present) {
9273                 if (!vrr_active) {
9274                         /* Use old throttling in non-vrr fixed refresh rate mode
9275                          * to keep flip scheduling based on target vblank counts
9276                          * working in a backwards compatible way, e.g., for
9277                          * clients using the GLX_OML_sync_control extension or
9278                          * DRI3/Present extension with defined target_msc.
9279                          */
9280                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9281                 }
9282                 else {
9283                         /* For variable refresh rate mode only:
9284                          * Get vblank of last completed flip to avoid > 1 vrr
9285                          * flips per video frame by use of throttling, but allow
9286                          * flip programming anywhere in the possibly large
9287                          * variable vrr vblank interval for fine-grained flip
9288                          * timing control and more opportunity to avoid stutter
9289                          * on late submission of flips.
9290                          */
9291                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9292                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9293                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9294                 }
9295
9296                 target_vblank = last_flip_vblank + wait_for_vblank;
9297
9298                 /*
9299                  * Wait until we're out of the vertical blank period before the one
9300                  * targeted by the flip
9301                  */
9302                 while ((acrtc_attach->enabled &&
9303                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9304                                                             0, &vpos, &hpos, NULL,
9305                                                             NULL, &pcrtc->hwmode)
9306                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9307                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9308                         (int)(target_vblank -
9309                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9310                         usleep_range(1000, 1100);
9311                 }
9312
9313                 /**
9314                  * Prepare the flip event for the pageflip interrupt to handle.
9315                  *
9316                  * This only works in the case where we've already turned on the
9317                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9318                  * from 0 -> n planes we have to skip a hardware generated event
9319                  * and rely on sending it from software.
9320                  */
9321                 if (acrtc_attach->base.state->event &&
9322                     acrtc_state->active_planes > 0 &&
9323                     !acrtc_state->force_dpms_off) {
9324                         drm_crtc_vblank_get(pcrtc);
9325
9326                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9327
9328                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9329                         prepare_flip_isr(acrtc_attach);
9330
9331                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9332                 }
9333
9334                 if (acrtc_state->stream) {
9335                         if (acrtc_state->freesync_vrr_info_changed)
9336                                 bundle->stream_update.vrr_infopacket =
9337                                         &acrtc_state->stream->vrr_infopacket;
9338                 }
9339         }
9340
9341         /* Update the planes if changed or disable if we don't have any. */
9342         if ((planes_count || acrtc_state->active_planes == 0) &&
9343                 acrtc_state->stream) {
9344                 /*
9345                  * If PSR or idle optimizations are enabled then flush out
9346                  * any pending work before hardware programming.
9347                  */
9348                 if (dm->vblank_control_workqueue)
9349                         flush_workqueue(dm->vblank_control_workqueue);
9350
9351                 bundle->stream_update.stream = acrtc_state->stream;
9352                 if (new_pcrtc_state->mode_changed) {
9353                         bundle->stream_update.src = acrtc_state->stream->src;
9354                         bundle->stream_update.dst = acrtc_state->stream->dst;
9355                 }
9356
9357                 if (new_pcrtc_state->color_mgmt_changed) {
9358                         /*
9359                          * TODO: This isn't fully correct since we've actually
9360                          * already modified the stream in place.
9361                          */
9362                         bundle->stream_update.gamut_remap =
9363                                 &acrtc_state->stream->gamut_remap_matrix;
9364                         bundle->stream_update.output_csc_transform =
9365                                 &acrtc_state->stream->csc_color_matrix;
9366                         bundle->stream_update.out_transfer_func =
9367                                 acrtc_state->stream->out_transfer_func;
9368                 }
9369
9370                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9371                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9372                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9373
9374                 /*
9375                  * If FreeSync state on the stream has changed then we need to
9376                  * re-adjust the min/max bounds now that DC doesn't handle this
9377                  * as part of commit.
9378                  */
9379                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9380                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9381                         dc_stream_adjust_vmin_vmax(
9382                                 dm->dc, acrtc_state->stream,
9383                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9384                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9385                 }
9386                 mutex_lock(&dm->dc_lock);
9387                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9388                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9389                         amdgpu_dm_psr_disable(acrtc_state->stream);
9390
9391                 dc_commit_updates_for_stream(dm->dc,
9392                                                      bundle->surface_updates,
9393                                                      planes_count,
9394                                                      acrtc_state->stream,
9395                                                      &bundle->stream_update,
9396                                                      dc_state);
9397
9398                 /**
9399                  * Enable or disable the interrupts on the backend.
9400                  *
9401                  * Most pipes are put into power gating when unused.
9402                  *
9403                  * When power gating is enabled on a pipe we lose the
9404                  * interrupt enablement state when power gating is disabled.
9405                  *
9406                  * So we need to update the IRQ control state in hardware
9407                  * whenever the pipe turns on (since it could be previously
9408                  * power gated) or off (since some pipes can't be power gated
9409                  * on some ASICs).
9410                  */
9411                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9412                         dm_update_pflip_irq_state(drm_to_adev(dev),
9413                                                   acrtc_attach);
9414
9415                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9416                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9417                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9418                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9419
9420                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9421                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9422                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9423                         struct amdgpu_dm_connector *aconn =
9424                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9425
9426                         if (aconn->psr_skip_count > 0)
9427                                 aconn->psr_skip_count--;
9428
9429                         /* Allow PSR when skip count is 0. */
9430                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9431                 } else {
9432                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9433                 }
9434
9435                 mutex_unlock(&dm->dc_lock);
9436         }
9437
9438         /*
9439          * Update cursor state *after* programming all the planes.
9440          * This avoids redundant programming in the case where we're going
9441          * to be disabling a single plane - those pipes are being disabled.
9442          */
9443         if (acrtc_state->active_planes)
9444                 amdgpu_dm_commit_cursors(state);
9445
9446 cleanup:
9447         kfree(bundle);
9448 }
9449
9450 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9451                                    struct drm_atomic_state *state)
9452 {
9453         struct amdgpu_device *adev = drm_to_adev(dev);
9454         struct amdgpu_dm_connector *aconnector;
9455         struct drm_connector *connector;
9456         struct drm_connector_state *old_con_state, *new_con_state;
9457         struct drm_crtc_state *new_crtc_state;
9458         struct dm_crtc_state *new_dm_crtc_state;
9459         const struct dc_stream_status *status;
9460         int i, inst;
9461
9462         /* Notify device removals. */
9463         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9464                 if (old_con_state->crtc != new_con_state->crtc) {
9465                         /* CRTC changes require notification. */
9466                         goto notify;
9467                 }
9468
9469                 if (!new_con_state->crtc)
9470                         continue;
9471
9472                 new_crtc_state = drm_atomic_get_new_crtc_state(
9473                         state, new_con_state->crtc);
9474
9475                 if (!new_crtc_state)
9476                         continue;
9477
9478                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9479                         continue;
9480
9481         notify:
9482                 aconnector = to_amdgpu_dm_connector(connector);
9483
9484                 mutex_lock(&adev->dm.audio_lock);
9485                 inst = aconnector->audio_inst;
9486                 aconnector->audio_inst = -1;
9487                 mutex_unlock(&adev->dm.audio_lock);
9488
9489                 amdgpu_dm_audio_eld_notify(adev, inst);
9490         }
9491
9492         /* Notify audio device additions. */
9493         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9494                 if (!new_con_state->crtc)
9495                         continue;
9496
9497                 new_crtc_state = drm_atomic_get_new_crtc_state(
9498                         state, new_con_state->crtc);
9499
9500                 if (!new_crtc_state)
9501                         continue;
9502
9503                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9504                         continue;
9505
9506                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9507                 if (!new_dm_crtc_state->stream)
9508                         continue;
9509
9510                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9511                 if (!status)
9512                         continue;
9513
9514                 aconnector = to_amdgpu_dm_connector(connector);
9515
9516                 mutex_lock(&adev->dm.audio_lock);
9517                 inst = status->audio_inst;
9518                 aconnector->audio_inst = inst;
9519                 mutex_unlock(&adev->dm.audio_lock);
9520
9521                 amdgpu_dm_audio_eld_notify(adev, inst);
9522         }
9523 }
9524
9525 /*
9526  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9527  * @crtc_state: the DRM CRTC state
9528  * @stream_state: the DC stream state.
9529  *
9530  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9531  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9532  */
9533 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9534                                                 struct dc_stream_state *stream_state)
9535 {
9536         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9537 }
9538
9539 /**
9540  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9541  * @state: The atomic state to commit
9542  *
9543  * This will tell DC to commit the constructed DC state from atomic_check,
9544  * programming the hardware. Any failures here implies a hardware failure, since
9545  * atomic check should have filtered anything non-kosher.
9546  */
9547 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9548 {
9549         struct drm_device *dev = state->dev;
9550         struct amdgpu_device *adev = drm_to_adev(dev);
9551         struct amdgpu_display_manager *dm = &adev->dm;
9552         struct dm_atomic_state *dm_state;
9553         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9554         uint32_t i, j;
9555         struct drm_crtc *crtc;
9556         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9557         unsigned long flags;
9558         bool wait_for_vblank = true;
9559         struct drm_connector *connector;
9560         struct drm_connector_state *old_con_state, *new_con_state;
9561         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9562         int crtc_disable_count = 0;
9563         bool mode_set_reset_required = false;
9564
9565         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9566
9567         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9568
9569         dm_state = dm_atomic_get_new_state(state);
9570         if (dm_state && dm_state->context) {
9571                 dc_state = dm_state->context;
9572         } else {
9573                 /* No state changes, retain current state. */
9574                 dc_state_temp = dc_create_state(dm->dc);
9575                 ASSERT(dc_state_temp);
9576                 dc_state = dc_state_temp;
9577                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9578         }
9579
9580         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9581                                        new_crtc_state, i) {
9582                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9583
9584                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9585
9586                 if (old_crtc_state->active &&
9587                     (!new_crtc_state->active ||
9588                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9589                         manage_dm_interrupts(adev, acrtc, false);
9590                         dc_stream_release(dm_old_crtc_state->stream);
9591                 }
9592         }
9593
9594         drm_atomic_helper_calc_timestamping_constants(state);
9595
9596         /* update changed items */
9597         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9598                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9599
9600                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9601                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9602
9603                 drm_dbg_state(state->dev,
9604                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9605                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9606                         "connectors_changed:%d\n",
9607                         acrtc->crtc_id,
9608                         new_crtc_state->enable,
9609                         new_crtc_state->active,
9610                         new_crtc_state->planes_changed,
9611                         new_crtc_state->mode_changed,
9612                         new_crtc_state->active_changed,
9613                         new_crtc_state->connectors_changed);
9614
9615                 /* Disable cursor if disabling crtc */
9616                 if (old_crtc_state->active && !new_crtc_state->active) {
9617                         struct dc_cursor_position position;
9618
9619                         memset(&position, 0, sizeof(position));
9620                         mutex_lock(&dm->dc_lock);
9621                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9622                         mutex_unlock(&dm->dc_lock);
9623                 }
9624
9625                 /* Copy all transient state flags into dc state */
9626                 if (dm_new_crtc_state->stream) {
9627                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9628                                                             dm_new_crtc_state->stream);
9629                 }
9630
9631                 /* handles headless hotplug case, updating new_state and
9632                  * aconnector as needed
9633                  */
9634
9635                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9636
9637                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9638
9639                         if (!dm_new_crtc_state->stream) {
9640                                 /*
9641                                  * this could happen because of issues with
9642                                  * userspace notifications delivery.
9643                                  * In this case userspace tries to set mode on
9644                                  * display which is disconnected in fact.
9645                                  * dc_sink is NULL in this case on aconnector.
9646                                  * We expect reset mode will come soon.
9647                                  *
9648                                  * This can also happen when unplug is done
9649                                  * during resume sequence ended
9650                                  *
9651                                  * In this case, we want to pretend we still
9652                                  * have a sink to keep the pipe running so that
9653                                  * hw state is consistent with the sw state
9654                                  */
9655                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9656                                                 __func__, acrtc->base.base.id);
9657                                 continue;
9658                         }
9659
9660                         if (dm_old_crtc_state->stream)
9661                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9662
9663                         pm_runtime_get_noresume(dev->dev);
9664
9665                         acrtc->enabled = true;
9666                         acrtc->hw_mode = new_crtc_state->mode;
9667                         crtc->hwmode = new_crtc_state->mode;
9668                         mode_set_reset_required = true;
9669                 } else if (modereset_required(new_crtc_state)) {
9670                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9671                         /* i.e. reset mode */
9672                         if (dm_old_crtc_state->stream)
9673                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9674
9675                         mode_set_reset_required = true;
9676                 }
9677         } /* for_each_crtc_in_state() */
9678
9679         if (dc_state) {
9680                 /* if there mode set or reset, disable eDP PSR */
9681                 if (mode_set_reset_required) {
9682                         if (dm->vblank_control_workqueue)
9683                                 flush_workqueue(dm->vblank_control_workqueue);
9684
9685                         amdgpu_dm_psr_disable_all(dm);
9686                 }
9687
9688                 dm_enable_per_frame_crtc_master_sync(dc_state);
9689                 mutex_lock(&dm->dc_lock);
9690                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9691
9692                 /* Allow idle optimization when vblank count is 0 for display off */
9693                 if (dm->active_vblank_irq_count == 0)
9694                         dc_allow_idle_optimizations(dm->dc, true);
9695                 mutex_unlock(&dm->dc_lock);
9696         }
9697
9698         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9699                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9700
9701                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9702
9703                 if (dm_new_crtc_state->stream != NULL) {
9704                         const struct dc_stream_status *status =
9705                                         dc_stream_get_status(dm_new_crtc_state->stream);
9706
9707                         if (!status)
9708                                 status = dc_stream_get_status_from_state(dc_state,
9709                                                                          dm_new_crtc_state->stream);
9710                         if (!status)
9711                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9712                         else
9713                                 acrtc->otg_inst = status->primary_otg_inst;
9714                 }
9715         }
9716 #ifdef CONFIG_DRM_AMD_DC_HDCP
9717         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9718                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9719                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9720                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9721
9722                 new_crtc_state = NULL;
9723
9724                 if (acrtc)
9725                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9726
9727                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9728
9729                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9730                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9731                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9732                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9733                         dm_new_con_state->update_hdcp = true;
9734                         continue;
9735                 }
9736
9737                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9738                         hdcp_update_display(
9739                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9740                                 new_con_state->hdcp_content_type,
9741                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9742         }
9743 #endif
9744
9745         /* Handle connector state changes */
9746         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9747                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9748                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9749                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9750                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9751                 struct dc_stream_update stream_update;
9752                 struct dc_info_packet hdr_packet;
9753                 struct dc_stream_status *status = NULL;
9754                 bool abm_changed, hdr_changed, scaling_changed;
9755
9756                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9757                 memset(&stream_update, 0, sizeof(stream_update));
9758
9759                 if (acrtc) {
9760                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9761                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9762                 }
9763
9764                 /* Skip any modesets/resets */
9765                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9766                         continue;
9767
9768                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9769                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9770
9771                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9772                                                              dm_old_con_state);
9773
9774                 abm_changed = dm_new_crtc_state->abm_level !=
9775                               dm_old_crtc_state->abm_level;
9776
9777                 hdr_changed =
9778                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9779
9780                 if (!scaling_changed && !abm_changed && !hdr_changed)
9781                         continue;
9782
9783                 stream_update.stream = dm_new_crtc_state->stream;
9784                 if (scaling_changed) {
9785                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9786                                         dm_new_con_state, dm_new_crtc_state->stream);
9787
9788                         stream_update.src = dm_new_crtc_state->stream->src;
9789                         stream_update.dst = dm_new_crtc_state->stream->dst;
9790                 }
9791
9792                 if (abm_changed) {
9793                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9794
9795                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9796                 }
9797
9798                 if (hdr_changed) {
9799                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9800                         stream_update.hdr_static_metadata = &hdr_packet;
9801                 }
9802
9803                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9804
9805                 if (WARN_ON(!status))
9806                         continue;
9807
9808                 WARN_ON(!status->plane_count);
9809
9810                 /*
9811                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9812                  * Here we create an empty update on each plane.
9813                  * To fix this, DC should permit updating only stream properties.
9814                  */
9815                 for (j = 0; j < status->plane_count; j++)
9816                         dummy_updates[j].surface = status->plane_states[0];
9817
9818
9819                 mutex_lock(&dm->dc_lock);
9820                 dc_commit_updates_for_stream(dm->dc,
9821                                                      dummy_updates,
9822                                                      status->plane_count,
9823                                                      dm_new_crtc_state->stream,
9824                                                      &stream_update,
9825                                                      dc_state);
9826                 mutex_unlock(&dm->dc_lock);
9827         }
9828
9829         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9830         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9831                                       new_crtc_state, i) {
9832                 if (old_crtc_state->active && !new_crtc_state->active)
9833                         crtc_disable_count++;
9834
9835                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9836                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9837
9838                 /* For freesync config update on crtc state and params for irq */
9839                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9840
9841                 /* Handle vrr on->off / off->on transitions */
9842                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9843                                                 dm_new_crtc_state);
9844         }
9845
9846         /**
9847          * Enable interrupts for CRTCs that are newly enabled or went through
9848          * a modeset. It was intentionally deferred until after the front end
9849          * state was modified to wait until the OTG was on and so the IRQ
9850          * handlers didn't access stale or invalid state.
9851          */
9852         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9853                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9854 #ifdef CONFIG_DEBUG_FS
9855                 bool configure_crc = false;
9856                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9857 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9858                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9859 #endif
9860                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9861                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9862                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9863 #endif
9864                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9865
9866                 if (new_crtc_state->active &&
9867                     (!old_crtc_state->active ||
9868                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9869                         dc_stream_retain(dm_new_crtc_state->stream);
9870                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9871                         manage_dm_interrupts(adev, acrtc, true);
9872
9873 #ifdef CONFIG_DEBUG_FS
9874                         /**
9875                          * Frontend may have changed so reapply the CRC capture
9876                          * settings for the stream.
9877                          */
9878                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9879
9880                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9881                                 configure_crc = true;
9882 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9883                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9884                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9885                                         acrtc->dm_irq_params.crc_window.update_win = true;
9886                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9887                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9888                                         crc_rd_wrk->crtc = crtc;
9889                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9890                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9891                                 }
9892 #endif
9893                         }
9894
9895                         if (configure_crc)
9896                                 if (amdgpu_dm_crtc_configure_crc_source(
9897                                         crtc, dm_new_crtc_state, cur_crc_src))
9898                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9899 #endif
9900                 }
9901         }
9902
9903         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9904                 if (new_crtc_state->async_flip)
9905                         wait_for_vblank = false;
9906
9907         /* update planes when needed per crtc*/
9908         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9909                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9910
9911                 if (dm_new_crtc_state->stream)
9912                         amdgpu_dm_commit_planes(state, dc_state, dev,
9913                                                 dm, crtc, wait_for_vblank);
9914         }
9915
9916         /* Update audio instances for each connector. */
9917         amdgpu_dm_commit_audio(dev, state);
9918
9919 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9920         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9921         /* restore the backlight level */
9922         for (i = 0; i < dm->num_of_edps; i++) {
9923                 if (dm->backlight_dev[i] &&
9924                     (dm->actual_brightness[i] != dm->brightness[i]))
9925                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9926         }
9927 #endif
9928         /*
9929          * send vblank event on all events not handled in flip and
9930          * mark consumed event for drm_atomic_helper_commit_hw_done
9931          */
9932         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9933         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9934
9935                 if (new_crtc_state->event)
9936                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9937
9938                 new_crtc_state->event = NULL;
9939         }
9940         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9941
9942         /* Signal HW programming completion */
9943         drm_atomic_helper_commit_hw_done(state);
9944
9945         if (wait_for_vblank)
9946                 drm_atomic_helper_wait_for_flip_done(dev, state);
9947
9948         drm_atomic_helper_cleanup_planes(dev, state);
9949
9950         /* return the stolen vga memory back to VRAM */
9951         if (!adev->mman.keep_stolen_vga_memory)
9952                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9953         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9954
9955         /*
9956          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9957          * so we can put the GPU into runtime suspend if we're not driving any
9958          * displays anymore
9959          */
9960         for (i = 0; i < crtc_disable_count; i++)
9961                 pm_runtime_put_autosuspend(dev->dev);
9962         pm_runtime_mark_last_busy(dev->dev);
9963
9964         if (dc_state_temp)
9965                 dc_release_state(dc_state_temp);
9966 }
9967
9968
9969 static int dm_force_atomic_commit(struct drm_connector *connector)
9970 {
9971         int ret = 0;
9972         struct drm_device *ddev = connector->dev;
9973         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9974         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9975         struct drm_plane *plane = disconnected_acrtc->base.primary;
9976         struct drm_connector_state *conn_state;
9977         struct drm_crtc_state *crtc_state;
9978         struct drm_plane_state *plane_state;
9979
9980         if (!state)
9981                 return -ENOMEM;
9982
9983         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9984
9985         /* Construct an atomic state to restore previous display setting */
9986
9987         /*
9988          * Attach connectors to drm_atomic_state
9989          */
9990         conn_state = drm_atomic_get_connector_state(state, connector);
9991
9992         ret = PTR_ERR_OR_ZERO(conn_state);
9993         if (ret)
9994                 goto out;
9995
9996         /* Attach crtc to drm_atomic_state*/
9997         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9998
9999         ret = PTR_ERR_OR_ZERO(crtc_state);
10000         if (ret)
10001                 goto out;
10002
10003         /* force a restore */
10004         crtc_state->mode_changed = true;
10005
10006         /* Attach plane to drm_atomic_state */
10007         plane_state = drm_atomic_get_plane_state(state, plane);
10008
10009         ret = PTR_ERR_OR_ZERO(plane_state);
10010         if (ret)
10011                 goto out;
10012
10013         /* Call commit internally with the state we just constructed */
10014         ret = drm_atomic_commit(state);
10015
10016 out:
10017         drm_atomic_state_put(state);
10018         if (ret)
10019                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10020
10021         return ret;
10022 }
10023
10024 /*
10025  * This function handles all cases when set mode does not come upon hotplug.
10026  * This includes when a display is unplugged then plugged back into the
10027  * same port and when running without usermode desktop manager supprot
10028  */
10029 void dm_restore_drm_connector_state(struct drm_device *dev,
10030                                     struct drm_connector *connector)
10031 {
10032         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10033         struct amdgpu_crtc *disconnected_acrtc;
10034         struct dm_crtc_state *acrtc_state;
10035
10036         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10037                 return;
10038
10039         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10040         if (!disconnected_acrtc)
10041                 return;
10042
10043         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10044         if (!acrtc_state->stream)
10045                 return;
10046
10047         /*
10048          * If the previous sink is not released and different from the current,
10049          * we deduce we are in a state where we can not rely on usermode call
10050          * to turn on the display, so we do it here
10051          */
10052         if (acrtc_state->stream->sink != aconnector->dc_sink)
10053                 dm_force_atomic_commit(&aconnector->base);
10054 }
10055
10056 /*
10057  * Grabs all modesetting locks to serialize against any blocking commits,
10058  * Waits for completion of all non blocking commits.
10059  */
10060 static int do_aquire_global_lock(struct drm_device *dev,
10061                                  struct drm_atomic_state *state)
10062 {
10063         struct drm_crtc *crtc;
10064         struct drm_crtc_commit *commit;
10065         long ret;
10066
10067         /*
10068          * Adding all modeset locks to aquire_ctx will
10069          * ensure that when the framework release it the
10070          * extra locks we are locking here will get released to
10071          */
10072         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10073         if (ret)
10074                 return ret;
10075
10076         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10077                 spin_lock(&crtc->commit_lock);
10078                 commit = list_first_entry_or_null(&crtc->commit_list,
10079                                 struct drm_crtc_commit, commit_entry);
10080                 if (commit)
10081                         drm_crtc_commit_get(commit);
10082                 spin_unlock(&crtc->commit_lock);
10083
10084                 if (!commit)
10085                         continue;
10086
10087                 /*
10088                  * Make sure all pending HW programming completed and
10089                  * page flips done
10090                  */
10091                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10092
10093                 if (ret > 0)
10094                         ret = wait_for_completion_interruptible_timeout(
10095                                         &commit->flip_done, 10*HZ);
10096
10097                 if (ret == 0)
10098                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10099                                   "timed out\n", crtc->base.id, crtc->name);
10100
10101                 drm_crtc_commit_put(commit);
10102         }
10103
10104         return ret < 0 ? ret : 0;
10105 }
10106
10107 static void get_freesync_config_for_crtc(
10108         struct dm_crtc_state *new_crtc_state,
10109         struct dm_connector_state *new_con_state)
10110 {
10111         struct mod_freesync_config config = {0};
10112         struct amdgpu_dm_connector *aconnector =
10113                         to_amdgpu_dm_connector(new_con_state->base.connector);
10114         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10115         int vrefresh = drm_mode_vrefresh(mode);
10116         bool fs_vid_mode = false;
10117
10118         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10119                                         vrefresh >= aconnector->min_vfreq &&
10120                                         vrefresh <= aconnector->max_vfreq;
10121
10122         if (new_crtc_state->vrr_supported) {
10123                 new_crtc_state->stream->ignore_msa_timing_param = true;
10124                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10125
10126                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10127                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10128                 config.vsif_supported = true;
10129                 config.btr = true;
10130
10131                 if (fs_vid_mode) {
10132                         config.state = VRR_STATE_ACTIVE_FIXED;
10133                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10134                         goto out;
10135                 } else if (new_crtc_state->base.vrr_enabled) {
10136                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10137                 } else {
10138                         config.state = VRR_STATE_INACTIVE;
10139                 }
10140         }
10141 out:
10142         new_crtc_state->freesync_config = config;
10143 }
10144
10145 static void reset_freesync_config_for_crtc(
10146         struct dm_crtc_state *new_crtc_state)
10147 {
10148         new_crtc_state->vrr_supported = false;
10149
10150         memset(&new_crtc_state->vrr_infopacket, 0,
10151                sizeof(new_crtc_state->vrr_infopacket));
10152 }
10153
10154 static bool
10155 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10156                                  struct drm_crtc_state *new_crtc_state)
10157 {
10158         const struct drm_display_mode *old_mode, *new_mode;
10159
10160         if (!old_crtc_state || !new_crtc_state)
10161                 return false;
10162
10163         old_mode = &old_crtc_state->mode;
10164         new_mode = &new_crtc_state->mode;
10165
10166         if (old_mode->clock       == new_mode->clock &&
10167             old_mode->hdisplay    == new_mode->hdisplay &&
10168             old_mode->vdisplay    == new_mode->vdisplay &&
10169             old_mode->htotal      == new_mode->htotal &&
10170             old_mode->vtotal      != new_mode->vtotal &&
10171             old_mode->hsync_start == new_mode->hsync_start &&
10172             old_mode->vsync_start != new_mode->vsync_start &&
10173             old_mode->hsync_end   == new_mode->hsync_end &&
10174             old_mode->vsync_end   != new_mode->vsync_end &&
10175             old_mode->hskew       == new_mode->hskew &&
10176             old_mode->vscan       == new_mode->vscan &&
10177             (old_mode->vsync_end - old_mode->vsync_start) ==
10178             (new_mode->vsync_end - new_mode->vsync_start))
10179                 return true;
10180
10181         return false;
10182 }
10183
10184 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10185         uint64_t num, den, res;
10186         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10187
10188         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10189
10190         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10191         den = (unsigned long long)new_crtc_state->mode.htotal *
10192               (unsigned long long)new_crtc_state->mode.vtotal;
10193
10194         res = div_u64(num, den);
10195         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10196 }
10197
10198 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10199                          struct drm_atomic_state *state,
10200                          struct drm_crtc *crtc,
10201                          struct drm_crtc_state *old_crtc_state,
10202                          struct drm_crtc_state *new_crtc_state,
10203                          bool enable,
10204                          bool *lock_and_validation_needed)
10205 {
10206         struct dm_atomic_state *dm_state = NULL;
10207         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10208         struct dc_stream_state *new_stream;
10209         int ret = 0;
10210
10211         /*
10212          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10213          * update changed items
10214          */
10215         struct amdgpu_crtc *acrtc = NULL;
10216         struct amdgpu_dm_connector *aconnector = NULL;
10217         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10218         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10219
10220         new_stream = NULL;
10221
10222         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10223         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10224         acrtc = to_amdgpu_crtc(crtc);
10225         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10226
10227         /* TODO This hack should go away */
10228         if (aconnector && enable) {
10229                 /* Make sure fake sink is created in plug-in scenario */
10230                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10231                                                             &aconnector->base);
10232                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10233                                                             &aconnector->base);
10234
10235                 if (IS_ERR(drm_new_conn_state)) {
10236                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10237                         goto fail;
10238                 }
10239
10240                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10241                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10242
10243                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10244                         goto skip_modeset;
10245
10246                 new_stream = create_validate_stream_for_sink(aconnector,
10247                                                              &new_crtc_state->mode,
10248                                                              dm_new_conn_state,
10249                                                              dm_old_crtc_state->stream);
10250
10251                 /*
10252                  * we can have no stream on ACTION_SET if a display
10253                  * was disconnected during S3, in this case it is not an
10254                  * error, the OS will be updated after detection, and
10255                  * will do the right thing on next atomic commit
10256                  */
10257
10258                 if (!new_stream) {
10259                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10260                                         __func__, acrtc->base.base.id);
10261                         ret = -ENOMEM;
10262                         goto fail;
10263                 }
10264
10265                 /*
10266                  * TODO: Check VSDB bits to decide whether this should
10267                  * be enabled or not.
10268                  */
10269                 new_stream->triggered_crtc_reset.enabled =
10270                         dm->force_timing_sync;
10271
10272                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10273
10274                 ret = fill_hdr_info_packet(drm_new_conn_state,
10275                                            &new_stream->hdr_static_metadata);
10276                 if (ret)
10277                         goto fail;
10278
10279                 /*
10280                  * If we already removed the old stream from the context
10281                  * (and set the new stream to NULL) then we can't reuse
10282                  * the old stream even if the stream and scaling are unchanged.
10283                  * We'll hit the BUG_ON and black screen.
10284                  *
10285                  * TODO: Refactor this function to allow this check to work
10286                  * in all conditions.
10287                  */
10288                 if (dm_new_crtc_state->stream &&
10289                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10290                         goto skip_modeset;
10291
10292                 if (dm_new_crtc_state->stream &&
10293                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10294                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10295                         new_crtc_state->mode_changed = false;
10296                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10297                                          new_crtc_state->mode_changed);
10298                 }
10299         }
10300
10301         /* mode_changed flag may get updated above, need to check again */
10302         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10303                 goto skip_modeset;
10304
10305         drm_dbg_state(state->dev,
10306                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10307                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10308                 "connectors_changed:%d\n",
10309                 acrtc->crtc_id,
10310                 new_crtc_state->enable,
10311                 new_crtc_state->active,
10312                 new_crtc_state->planes_changed,
10313                 new_crtc_state->mode_changed,
10314                 new_crtc_state->active_changed,
10315                 new_crtc_state->connectors_changed);
10316
10317         /* Remove stream for any changed/disabled CRTC */
10318         if (!enable) {
10319
10320                 if (!dm_old_crtc_state->stream)
10321                         goto skip_modeset;
10322
10323                 if (dm_new_crtc_state->stream &&
10324                     is_timing_unchanged_for_freesync(new_crtc_state,
10325                                                      old_crtc_state)) {
10326                         new_crtc_state->mode_changed = false;
10327                         DRM_DEBUG_DRIVER(
10328                                 "Mode change not required for front porch change, "
10329                                 "setting mode_changed to %d",
10330                                 new_crtc_state->mode_changed);
10331
10332                         set_freesync_fixed_config(dm_new_crtc_state);
10333
10334                         goto skip_modeset;
10335                 } else if (aconnector &&
10336                            is_freesync_video_mode(&new_crtc_state->mode,
10337                                                   aconnector)) {
10338                         struct drm_display_mode *high_mode;
10339
10340                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10341                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10342                                 set_freesync_fixed_config(dm_new_crtc_state);
10343                         }
10344                 }
10345
10346                 ret = dm_atomic_get_state(state, &dm_state);
10347                 if (ret)
10348                         goto fail;
10349
10350                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10351                                 crtc->base.id);
10352
10353                 /* i.e. reset mode */
10354                 if (dc_remove_stream_from_ctx(
10355                                 dm->dc,
10356                                 dm_state->context,
10357                                 dm_old_crtc_state->stream) != DC_OK) {
10358                         ret = -EINVAL;
10359                         goto fail;
10360                 }
10361
10362                 dc_stream_release(dm_old_crtc_state->stream);
10363                 dm_new_crtc_state->stream = NULL;
10364
10365                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10366
10367                 *lock_and_validation_needed = true;
10368
10369         } else {/* Add stream for any updated/enabled CRTC */
10370                 /*
10371                  * Quick fix to prevent NULL pointer on new_stream when
10372                  * added MST connectors not found in existing crtc_state in the chained mode
10373                  * TODO: need to dig out the root cause of that
10374                  */
10375                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10376                         goto skip_modeset;
10377
10378                 if (modereset_required(new_crtc_state))
10379                         goto skip_modeset;
10380
10381                 if (modeset_required(new_crtc_state, new_stream,
10382                                      dm_old_crtc_state->stream)) {
10383
10384                         WARN_ON(dm_new_crtc_state->stream);
10385
10386                         ret = dm_atomic_get_state(state, &dm_state);
10387                         if (ret)
10388                                 goto fail;
10389
10390                         dm_new_crtc_state->stream = new_stream;
10391
10392                         dc_stream_retain(new_stream);
10393
10394                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10395                                          crtc->base.id);
10396
10397                         if (dc_add_stream_to_ctx(
10398                                         dm->dc,
10399                                         dm_state->context,
10400                                         dm_new_crtc_state->stream) != DC_OK) {
10401                                 ret = -EINVAL;
10402                                 goto fail;
10403                         }
10404
10405                         *lock_and_validation_needed = true;
10406                 }
10407         }
10408
10409 skip_modeset:
10410         /* Release extra reference */
10411         if (new_stream)
10412                  dc_stream_release(new_stream);
10413
10414         /*
10415          * We want to do dc stream updates that do not require a
10416          * full modeset below.
10417          */
10418         if (!(enable && aconnector && new_crtc_state->active))
10419                 return 0;
10420         /*
10421          * Given above conditions, the dc state cannot be NULL because:
10422          * 1. We're in the process of enabling CRTCs (just been added
10423          *    to the dc context, or already is on the context)
10424          * 2. Has a valid connector attached, and
10425          * 3. Is currently active and enabled.
10426          * => The dc stream state currently exists.
10427          */
10428         BUG_ON(dm_new_crtc_state->stream == NULL);
10429
10430         /* Scaling or underscan settings */
10431         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10432                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10433                 update_stream_scaling_settings(
10434                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10435
10436         /* ABM settings */
10437         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10438
10439         /*
10440          * Color management settings. We also update color properties
10441          * when a modeset is needed, to ensure it gets reprogrammed.
10442          */
10443         if (dm_new_crtc_state->base.color_mgmt_changed ||
10444             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10445                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10446                 if (ret)
10447                         goto fail;
10448         }
10449
10450         /* Update Freesync settings. */
10451         get_freesync_config_for_crtc(dm_new_crtc_state,
10452                                      dm_new_conn_state);
10453
10454         return ret;
10455
10456 fail:
10457         if (new_stream)
10458                 dc_stream_release(new_stream);
10459         return ret;
10460 }
10461
10462 static bool should_reset_plane(struct drm_atomic_state *state,
10463                                struct drm_plane *plane,
10464                                struct drm_plane_state *old_plane_state,
10465                                struct drm_plane_state *new_plane_state)
10466 {
10467         struct drm_plane *other;
10468         struct drm_plane_state *old_other_state, *new_other_state;
10469         struct drm_crtc_state *new_crtc_state;
10470         int i;
10471
10472         /*
10473          * TODO: Remove this hack once the checks below are sufficient
10474          * enough to determine when we need to reset all the planes on
10475          * the stream.
10476          */
10477         if (state->allow_modeset)
10478                 return true;
10479
10480         /* Exit early if we know that we're adding or removing the plane. */
10481         if (old_plane_state->crtc != new_plane_state->crtc)
10482                 return true;
10483
10484         /* old crtc == new_crtc == NULL, plane not in context. */
10485         if (!new_plane_state->crtc)
10486                 return false;
10487
10488         new_crtc_state =
10489                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10490
10491         if (!new_crtc_state)
10492                 return true;
10493
10494         /* CRTC Degamma changes currently require us to recreate planes. */
10495         if (new_crtc_state->color_mgmt_changed)
10496                 return true;
10497
10498         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10499                 return true;
10500
10501         /*
10502          * If there are any new primary or overlay planes being added or
10503          * removed then the z-order can potentially change. To ensure
10504          * correct z-order and pipe acquisition the current DC architecture
10505          * requires us to remove and recreate all existing planes.
10506          *
10507          * TODO: Come up with a more elegant solution for this.
10508          */
10509         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10510                 struct amdgpu_framebuffer *old_afb, *new_afb;
10511                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10512                         continue;
10513
10514                 if (old_other_state->crtc != new_plane_state->crtc &&
10515                     new_other_state->crtc != new_plane_state->crtc)
10516                         continue;
10517
10518                 if (old_other_state->crtc != new_other_state->crtc)
10519                         return true;
10520
10521                 /* Src/dst size and scaling updates. */
10522                 if (old_other_state->src_w != new_other_state->src_w ||
10523                     old_other_state->src_h != new_other_state->src_h ||
10524                     old_other_state->crtc_w != new_other_state->crtc_w ||
10525                     old_other_state->crtc_h != new_other_state->crtc_h)
10526                         return true;
10527
10528                 /* Rotation / mirroring updates. */
10529                 if (old_other_state->rotation != new_other_state->rotation)
10530                         return true;
10531
10532                 /* Blending updates. */
10533                 if (old_other_state->pixel_blend_mode !=
10534                     new_other_state->pixel_blend_mode)
10535                         return true;
10536
10537                 /* Alpha updates. */
10538                 if (old_other_state->alpha != new_other_state->alpha)
10539                         return true;
10540
10541                 /* Colorspace changes. */
10542                 if (old_other_state->color_range != new_other_state->color_range ||
10543                     old_other_state->color_encoding != new_other_state->color_encoding)
10544                         return true;
10545
10546                 /* Framebuffer checks fall at the end. */
10547                 if (!old_other_state->fb || !new_other_state->fb)
10548                         continue;
10549
10550                 /* Pixel format changes can require bandwidth updates. */
10551                 if (old_other_state->fb->format != new_other_state->fb->format)
10552                         return true;
10553
10554                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10555                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10556
10557                 /* Tiling and DCC changes also require bandwidth updates. */
10558                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10559                     old_afb->base.modifier != new_afb->base.modifier)
10560                         return true;
10561         }
10562
10563         return false;
10564 }
10565
10566 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10567                               struct drm_plane_state *new_plane_state,
10568                               struct drm_framebuffer *fb)
10569 {
10570         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10571         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10572         unsigned int pitch;
10573         bool linear;
10574
10575         if (fb->width > new_acrtc->max_cursor_width ||
10576             fb->height > new_acrtc->max_cursor_height) {
10577                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10578                                  new_plane_state->fb->width,
10579                                  new_plane_state->fb->height);
10580                 return -EINVAL;
10581         }
10582         if (new_plane_state->src_w != fb->width << 16 ||
10583             new_plane_state->src_h != fb->height << 16) {
10584                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10585                 return -EINVAL;
10586         }
10587
10588         /* Pitch in pixels */
10589         pitch = fb->pitches[0] / fb->format->cpp[0];
10590
10591         if (fb->width != pitch) {
10592                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10593                                  fb->width, pitch);
10594                 return -EINVAL;
10595         }
10596
10597         switch (pitch) {
10598         case 64:
10599         case 128:
10600         case 256:
10601                 /* FB pitch is supported by cursor plane */
10602                 break;
10603         default:
10604                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10605                 return -EINVAL;
10606         }
10607
10608         /* Core DRM takes care of checking FB modifiers, so we only need to
10609          * check tiling flags when the FB doesn't have a modifier. */
10610         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10611                 if (adev->family < AMDGPU_FAMILY_AI) {
10612                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10613                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10614                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10615                 } else {
10616                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10617                 }
10618                 if (!linear) {
10619                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10620                         return -EINVAL;
10621                 }
10622         }
10623
10624         return 0;
10625 }
10626
10627 static int dm_update_plane_state(struct dc *dc,
10628                                  struct drm_atomic_state *state,
10629                                  struct drm_plane *plane,
10630                                  struct drm_plane_state *old_plane_state,
10631                                  struct drm_plane_state *new_plane_state,
10632                                  bool enable,
10633                                  bool *lock_and_validation_needed)
10634 {
10635
10636         struct dm_atomic_state *dm_state = NULL;
10637         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10638         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10639         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10640         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10641         struct amdgpu_crtc *new_acrtc;
10642         bool needs_reset;
10643         int ret = 0;
10644
10645
10646         new_plane_crtc = new_plane_state->crtc;
10647         old_plane_crtc = old_plane_state->crtc;
10648         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10649         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10650
10651         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10652                 if (!enable || !new_plane_crtc ||
10653                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10654                         return 0;
10655
10656                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10657
10658                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10659                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10660                         return -EINVAL;
10661                 }
10662
10663                 if (new_plane_state->fb) {
10664                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10665                                                  new_plane_state->fb);
10666                         if (ret)
10667                                 return ret;
10668                 }
10669
10670                 return 0;
10671         }
10672
10673         needs_reset = should_reset_plane(state, plane, old_plane_state,
10674                                          new_plane_state);
10675
10676         /* Remove any changed/removed planes */
10677         if (!enable) {
10678                 if (!needs_reset)
10679                         return 0;
10680
10681                 if (!old_plane_crtc)
10682                         return 0;
10683
10684                 old_crtc_state = drm_atomic_get_old_crtc_state(
10685                                 state, old_plane_crtc);
10686                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10687
10688                 if (!dm_old_crtc_state->stream)
10689                         return 0;
10690
10691                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10692                                 plane->base.id, old_plane_crtc->base.id);
10693
10694                 ret = dm_atomic_get_state(state, &dm_state);
10695                 if (ret)
10696                         return ret;
10697
10698                 if (!dc_remove_plane_from_context(
10699                                 dc,
10700                                 dm_old_crtc_state->stream,
10701                                 dm_old_plane_state->dc_state,
10702                                 dm_state->context)) {
10703
10704                         return -EINVAL;
10705                 }
10706
10707
10708                 dc_plane_state_release(dm_old_plane_state->dc_state);
10709                 dm_new_plane_state->dc_state = NULL;
10710
10711                 *lock_and_validation_needed = true;
10712
10713         } else { /* Add new planes */
10714                 struct dc_plane_state *dc_new_plane_state;
10715
10716                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10717                         return 0;
10718
10719                 if (!new_plane_crtc)
10720                         return 0;
10721
10722                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10723                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10724
10725                 if (!dm_new_crtc_state->stream)
10726                         return 0;
10727
10728                 if (!needs_reset)
10729                         return 0;
10730
10731                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10732                 if (ret)
10733                         return ret;
10734
10735                 WARN_ON(dm_new_plane_state->dc_state);
10736
10737                 dc_new_plane_state = dc_create_plane_state(dc);
10738                 if (!dc_new_plane_state)
10739                         return -ENOMEM;
10740
10741                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10742                                  plane->base.id, new_plane_crtc->base.id);
10743
10744                 ret = fill_dc_plane_attributes(
10745                         drm_to_adev(new_plane_crtc->dev),
10746                         dc_new_plane_state,
10747                         new_plane_state,
10748                         new_crtc_state);
10749                 if (ret) {
10750                         dc_plane_state_release(dc_new_plane_state);
10751                         return ret;
10752                 }
10753
10754                 ret = dm_atomic_get_state(state, &dm_state);
10755                 if (ret) {
10756                         dc_plane_state_release(dc_new_plane_state);
10757                         return ret;
10758                 }
10759
10760                 /*
10761                  * Any atomic check errors that occur after this will
10762                  * not need a release. The plane state will be attached
10763                  * to the stream, and therefore part of the atomic
10764                  * state. It'll be released when the atomic state is
10765                  * cleaned.
10766                  */
10767                 if (!dc_add_plane_to_context(
10768                                 dc,
10769                                 dm_new_crtc_state->stream,
10770                                 dc_new_plane_state,
10771                                 dm_state->context)) {
10772
10773                         dc_plane_state_release(dc_new_plane_state);
10774                         return -EINVAL;
10775                 }
10776
10777                 dm_new_plane_state->dc_state = dc_new_plane_state;
10778
10779                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10780
10781                 /* Tell DC to do a full surface update every time there
10782                  * is a plane change. Inefficient, but works for now.
10783                  */
10784                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10785
10786                 *lock_and_validation_needed = true;
10787         }
10788
10789
10790         return ret;
10791 }
10792
10793 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10794                                        int *src_w, int *src_h)
10795 {
10796         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10797         case DRM_MODE_ROTATE_90:
10798         case DRM_MODE_ROTATE_270:
10799                 *src_w = plane_state->src_h >> 16;
10800                 *src_h = plane_state->src_w >> 16;
10801                 break;
10802         case DRM_MODE_ROTATE_0:
10803         case DRM_MODE_ROTATE_180:
10804         default:
10805                 *src_w = plane_state->src_w >> 16;
10806                 *src_h = plane_state->src_h >> 16;
10807                 break;
10808         }
10809 }
10810
10811 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10812                                 struct drm_crtc *crtc,
10813                                 struct drm_crtc_state *new_crtc_state)
10814 {
10815         struct drm_plane *cursor = crtc->cursor, *underlying;
10816         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10817         int i;
10818         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10819         int cursor_src_w, cursor_src_h;
10820         int underlying_src_w, underlying_src_h;
10821
10822         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10823          * cursor per pipe but it's going to inherit the scaling and
10824          * positioning from the underlying pipe. Check the cursor plane's
10825          * blending properties match the underlying planes'. */
10826
10827         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10828         if (!new_cursor_state || !new_cursor_state->fb) {
10829                 return 0;
10830         }
10831
10832         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10833         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10834         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10835
10836         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10837                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10838                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10839                         continue;
10840
10841                 /* Ignore disabled planes */
10842                 if (!new_underlying_state->fb)
10843                         continue;
10844
10845                 dm_get_oriented_plane_size(new_underlying_state,
10846                                            &underlying_src_w, &underlying_src_h);
10847                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10848                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10849
10850                 if (cursor_scale_w != underlying_scale_w ||
10851                     cursor_scale_h != underlying_scale_h) {
10852                         drm_dbg_atomic(crtc->dev,
10853                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10854                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10855                         return -EINVAL;
10856                 }
10857
10858                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10859                 if (new_underlying_state->crtc_x <= 0 &&
10860                     new_underlying_state->crtc_y <= 0 &&
10861                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10862                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10863                         break;
10864         }
10865
10866         return 0;
10867 }
10868
10869 #if defined(CONFIG_DRM_AMD_DC_DCN)
10870 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10871 {
10872         struct drm_connector *connector;
10873         struct drm_connector_state *conn_state, *old_conn_state;
10874         struct amdgpu_dm_connector *aconnector = NULL;
10875         int i;
10876         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10877                 if (!conn_state->crtc)
10878                         conn_state = old_conn_state;
10879
10880                 if (conn_state->crtc != crtc)
10881                         continue;
10882
10883                 aconnector = to_amdgpu_dm_connector(connector);
10884                 if (!aconnector->port || !aconnector->mst_port)
10885                         aconnector = NULL;
10886                 else
10887                         break;
10888         }
10889
10890         if (!aconnector)
10891                 return 0;
10892
10893         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10894 }
10895 #endif
10896
10897 /**
10898  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10899  * @dev: The DRM device
10900  * @state: The atomic state to commit
10901  *
10902  * Validate that the given atomic state is programmable by DC into hardware.
10903  * This involves constructing a &struct dc_state reflecting the new hardware
10904  * state we wish to commit, then querying DC to see if it is programmable. It's
10905  * important not to modify the existing DC state. Otherwise, atomic_check
10906  * may unexpectedly commit hardware changes.
10907  *
10908  * When validating the DC state, it's important that the right locks are
10909  * acquired. For full updates case which removes/adds/updates streams on one
10910  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10911  * that any such full update commit will wait for completion of any outstanding
10912  * flip using DRMs synchronization events.
10913  *
10914  * Note that DM adds the affected connectors for all CRTCs in state, when that
10915  * might not seem necessary. This is because DC stream creation requires the
10916  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10917  * be possible but non-trivial - a possible TODO item.
10918  *
10919  * Return: -Error code if validation failed.
10920  */
10921 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10922                                   struct drm_atomic_state *state)
10923 {
10924         struct amdgpu_device *adev = drm_to_adev(dev);
10925         struct dm_atomic_state *dm_state = NULL;
10926         struct dc *dc = adev->dm.dc;
10927         struct drm_connector *connector;
10928         struct drm_connector_state *old_con_state, *new_con_state;
10929         struct drm_crtc *crtc;
10930         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10931         struct drm_plane *plane;
10932         struct drm_plane_state *old_plane_state, *new_plane_state;
10933         enum dc_status status;
10934         int ret, i;
10935         bool lock_and_validation_needed = false;
10936         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10937 #if defined(CONFIG_DRM_AMD_DC_DCN)
10938         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10939         struct drm_dp_mst_topology_state *mst_state;
10940         struct drm_dp_mst_topology_mgr *mgr;
10941 #endif
10942
10943         trace_amdgpu_dm_atomic_check_begin(state);
10944
10945         ret = drm_atomic_helper_check_modeset(dev, state);
10946         if (ret) {
10947                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10948                 goto fail;
10949         }
10950
10951         /* Check connector changes */
10952         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10953                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10954                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10955
10956                 /* Skip connectors that are disabled or part of modeset already. */
10957                 if (!old_con_state->crtc && !new_con_state->crtc)
10958                         continue;
10959
10960                 if (!new_con_state->crtc)
10961                         continue;
10962
10963                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10964                 if (IS_ERR(new_crtc_state)) {
10965                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10966                         ret = PTR_ERR(new_crtc_state);
10967                         goto fail;
10968                 }
10969
10970                 if (dm_old_con_state->abm_level !=
10971                     dm_new_con_state->abm_level)
10972                         new_crtc_state->connectors_changed = true;
10973         }
10974
10975 #if defined(CONFIG_DRM_AMD_DC_DCN)
10976         if (dc_resource_is_dsc_encoding_supported(dc)) {
10977                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10978                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10979                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10980                                 if (ret) {
10981                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10982                                         goto fail;
10983                                 }
10984                         }
10985                 }
10986                 pre_validate_dsc(state, &dm_state, vars);
10987         }
10988 #endif
10989         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10990                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10991
10992                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10993                     !new_crtc_state->color_mgmt_changed &&
10994                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10995                         dm_old_crtc_state->dsc_force_changed == false)
10996                         continue;
10997
10998                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10999                 if (ret) {
11000                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
11001                         goto fail;
11002                 }
11003
11004                 if (!new_crtc_state->enable)
11005                         continue;
11006
11007                 ret = drm_atomic_add_affected_connectors(state, crtc);
11008                 if (ret) {
11009                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11010                         goto fail;
11011                 }
11012
11013                 ret = drm_atomic_add_affected_planes(state, crtc);
11014                 if (ret) {
11015                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11016                         goto fail;
11017                 }
11018
11019                 if (dm_old_crtc_state->dsc_force_changed)
11020                         new_crtc_state->mode_changed = true;
11021         }
11022
11023         /*
11024          * Add all primary and overlay planes on the CRTC to the state
11025          * whenever a plane is enabled to maintain correct z-ordering
11026          * and to enable fast surface updates.
11027          */
11028         drm_for_each_crtc(crtc, dev) {
11029                 bool modified = false;
11030
11031                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11032                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11033                                 continue;
11034
11035                         if (new_plane_state->crtc == crtc ||
11036                             old_plane_state->crtc == crtc) {
11037                                 modified = true;
11038                                 break;
11039                         }
11040                 }
11041
11042                 if (!modified)
11043                         continue;
11044
11045                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11046                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11047                                 continue;
11048
11049                         new_plane_state =
11050                                 drm_atomic_get_plane_state(state, plane);
11051
11052                         if (IS_ERR(new_plane_state)) {
11053                                 ret = PTR_ERR(new_plane_state);
11054                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11055                                 goto fail;
11056                         }
11057                 }
11058         }
11059
11060         /* Remove exiting planes if they are modified */
11061         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11062                 ret = dm_update_plane_state(dc, state, plane,
11063                                             old_plane_state,
11064                                             new_plane_state,
11065                                             false,
11066                                             &lock_and_validation_needed);
11067                 if (ret) {
11068                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11069                         goto fail;
11070                 }
11071         }
11072
11073         /* Disable all crtcs which require disable */
11074         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11075                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11076                                            old_crtc_state,
11077                                            new_crtc_state,
11078                                            false,
11079                                            &lock_and_validation_needed);
11080                 if (ret) {
11081                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11082                         goto fail;
11083                 }
11084         }
11085
11086         /* Enable all crtcs which require enable */
11087         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11088                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11089                                            old_crtc_state,
11090                                            new_crtc_state,
11091                                            true,
11092                                            &lock_and_validation_needed);
11093                 if (ret) {
11094                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11095                         goto fail;
11096                 }
11097         }
11098
11099         /* Add new/modified planes */
11100         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11101                 ret = dm_update_plane_state(dc, state, plane,
11102                                             old_plane_state,
11103                                             new_plane_state,
11104                                             true,
11105                                             &lock_and_validation_needed);
11106                 if (ret) {
11107                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11108                         goto fail;
11109                 }
11110         }
11111
11112         /* Run this here since we want to validate the streams we created */
11113         ret = drm_atomic_helper_check_planes(dev, state);
11114         if (ret) {
11115                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11116                 goto fail;
11117         }
11118
11119         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11120                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11121                 if (dm_new_crtc_state->mpo_requested)
11122                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11123         }
11124
11125         /* Check cursor planes scaling */
11126         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11127                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11128                 if (ret) {
11129                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11130                         goto fail;
11131                 }
11132         }
11133
11134         if (state->legacy_cursor_update) {
11135                 /*
11136                  * This is a fast cursor update coming from the plane update
11137                  * helper, check if it can be done asynchronously for better
11138                  * performance.
11139                  */
11140                 state->async_update =
11141                         !drm_atomic_helper_async_check(dev, state);
11142
11143                 /*
11144                  * Skip the remaining global validation if this is an async
11145                  * update. Cursor updates can be done without affecting
11146                  * state or bandwidth calcs and this avoids the performance
11147                  * penalty of locking the private state object and
11148                  * allocating a new dc_state.
11149                  */
11150                 if (state->async_update)
11151                         return 0;
11152         }
11153
11154         /* Check scaling and underscan changes*/
11155         /* TODO Removed scaling changes validation due to inability to commit
11156          * new stream into context w\o causing full reset. Need to
11157          * decide how to handle.
11158          */
11159         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11160                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11161                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11162                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11163
11164                 /* Skip any modesets/resets */
11165                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11166                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11167                         continue;
11168
11169                 /* Skip any thing not scale or underscan changes */
11170                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11171                         continue;
11172
11173                 lock_and_validation_needed = true;
11174         }
11175
11176 #if defined(CONFIG_DRM_AMD_DC_DCN)
11177         /* set the slot info for each mst_state based on the link encoding format */
11178         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11179                 struct amdgpu_dm_connector *aconnector;
11180                 struct drm_connector *connector;
11181                 struct drm_connector_list_iter iter;
11182                 u8 link_coding_cap;
11183
11184                 if (!mgr->mst_state )
11185                         continue;
11186
11187                 drm_connector_list_iter_begin(dev, &iter);
11188                 drm_for_each_connector_iter(connector, &iter) {
11189                         int id = connector->index;
11190
11191                         if (id == mst_state->mgr->conn_base_id) {
11192                                 aconnector = to_amdgpu_dm_connector(connector);
11193                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11194                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11195
11196                                 break;
11197                         }
11198                 }
11199                 drm_connector_list_iter_end(&iter);
11200
11201         }
11202 #endif
11203         /**
11204          * Streams and planes are reset when there are changes that affect
11205          * bandwidth. Anything that affects bandwidth needs to go through
11206          * DC global validation to ensure that the configuration can be applied
11207          * to hardware.
11208          *
11209          * We have to currently stall out here in atomic_check for outstanding
11210          * commits to finish in this case because our IRQ handlers reference
11211          * DRM state directly - we can end up disabling interrupts too early
11212          * if we don't.
11213          *
11214          * TODO: Remove this stall and drop DM state private objects.
11215          */
11216         if (lock_and_validation_needed) {
11217                 ret = dm_atomic_get_state(state, &dm_state);
11218                 if (ret) {
11219                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11220                         goto fail;
11221                 }
11222
11223                 ret = do_aquire_global_lock(dev, state);
11224                 if (ret) {
11225                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11226                         goto fail;
11227                 }
11228
11229 #if defined(CONFIG_DRM_AMD_DC_DCN)
11230                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11231                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11232                         goto fail;
11233                 }
11234
11235                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11236                 if (ret) {
11237                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11238                         goto fail;
11239                 }
11240 #endif
11241
11242                 /*
11243                  * Perform validation of MST topology in the state:
11244                  * We need to perform MST atomic check before calling
11245                  * dc_validate_global_state(), or there is a chance
11246                  * to get stuck in an infinite loop and hang eventually.
11247                  */
11248                 ret = drm_dp_mst_atomic_check(state);
11249                 if (ret) {
11250                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11251                         goto fail;
11252                 }
11253                 status = dc_validate_global_state(dc, dm_state->context, true);
11254                 if (status != DC_OK) {
11255                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11256                                        dc_status_to_str(status), status);
11257                         ret = -EINVAL;
11258                         goto fail;
11259                 }
11260         } else {
11261                 /*
11262                  * The commit is a fast update. Fast updates shouldn't change
11263                  * the DC context, affect global validation, and can have their
11264                  * commit work done in parallel with other commits not touching
11265                  * the same resource. If we have a new DC context as part of
11266                  * the DM atomic state from validation we need to free it and
11267                  * retain the existing one instead.
11268                  *
11269                  * Furthermore, since the DM atomic state only contains the DC
11270                  * context and can safely be annulled, we can free the state
11271                  * and clear the associated private object now to free
11272                  * some memory and avoid a possible use-after-free later.
11273                  */
11274
11275                 for (i = 0; i < state->num_private_objs; i++) {
11276                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11277
11278                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11279                                 int j = state->num_private_objs-1;
11280
11281                                 dm_atomic_destroy_state(obj,
11282                                                 state->private_objs[i].state);
11283
11284                                 /* If i is not at the end of the array then the
11285                                  * last element needs to be moved to where i was
11286                                  * before the array can safely be truncated.
11287                                  */
11288                                 if (i != j)
11289                                         state->private_objs[i] =
11290                                                 state->private_objs[j];
11291
11292                                 state->private_objs[j].ptr = NULL;
11293                                 state->private_objs[j].state = NULL;
11294                                 state->private_objs[j].old_state = NULL;
11295                                 state->private_objs[j].new_state = NULL;
11296
11297                                 state->num_private_objs = j;
11298                                 break;
11299                         }
11300                 }
11301         }
11302
11303         /* Store the overall update type for use later in atomic check. */
11304         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11305                 struct dm_crtc_state *dm_new_crtc_state =
11306                         to_dm_crtc_state(new_crtc_state);
11307
11308                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11309                                                          UPDATE_TYPE_FULL :
11310                                                          UPDATE_TYPE_FAST;
11311         }
11312
11313         /* Must be success */
11314         WARN_ON(ret);
11315
11316         trace_amdgpu_dm_atomic_check_finish(state, ret);
11317
11318         return ret;
11319
11320 fail:
11321         if (ret == -EDEADLK)
11322                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11323         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11324                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11325         else
11326                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11327
11328         trace_amdgpu_dm_atomic_check_finish(state, ret);
11329
11330         return ret;
11331 }
11332
11333 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11334                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11335 {
11336         uint8_t dpcd_data;
11337         bool capable = false;
11338
11339         if (amdgpu_dm_connector->dc_link &&
11340                 dm_helpers_dp_read_dpcd(
11341                                 NULL,
11342                                 amdgpu_dm_connector->dc_link,
11343                                 DP_DOWN_STREAM_PORT_COUNT,
11344                                 &dpcd_data,
11345                                 sizeof(dpcd_data))) {
11346                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11347         }
11348
11349         return capable;
11350 }
11351
11352 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11353                 unsigned int offset,
11354                 unsigned int total_length,
11355                 uint8_t *data,
11356                 unsigned int length,
11357                 struct amdgpu_hdmi_vsdb_info *vsdb)
11358 {
11359         bool res;
11360         union dmub_rb_cmd cmd;
11361         struct dmub_cmd_send_edid_cea *input;
11362         struct dmub_cmd_edid_cea_output *output;
11363
11364         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11365                 return false;
11366
11367         memset(&cmd, 0, sizeof(cmd));
11368
11369         input = &cmd.edid_cea.data.input;
11370
11371         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11372         cmd.edid_cea.header.sub_type = 0;
11373         cmd.edid_cea.header.payload_bytes =
11374                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11375         input->offset = offset;
11376         input->length = length;
11377         input->cea_total_length = total_length;
11378         memcpy(input->payload, data, length);
11379
11380         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11381         if (!res) {
11382                 DRM_ERROR("EDID CEA parser failed\n");
11383                 return false;
11384         }
11385
11386         output = &cmd.edid_cea.data.output;
11387
11388         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11389                 if (!output->ack.success) {
11390                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11391                                         output->ack.offset);
11392                 }
11393         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11394                 if (!output->amd_vsdb.vsdb_found)
11395                         return false;
11396
11397                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11398                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11399                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11400                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11401         } else {
11402                 DRM_WARN("Unknown EDID CEA parser results\n");
11403                 return false;
11404         }
11405
11406         return true;
11407 }
11408
11409 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11410                 uint8_t *edid_ext, int len,
11411                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11412 {
11413         int i;
11414
11415         /* send extension block to DMCU for parsing */
11416         for (i = 0; i < len; i += 8) {
11417                 bool res;
11418                 int offset;
11419
11420                 /* send 8 bytes a time */
11421                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11422                         return false;
11423
11424                 if (i+8 == len) {
11425                         /* EDID block sent completed, expect result */
11426                         int version, min_rate, max_rate;
11427
11428                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11429                         if (res) {
11430                                 /* amd vsdb found */
11431                                 vsdb_info->freesync_supported = 1;
11432                                 vsdb_info->amd_vsdb_version = version;
11433                                 vsdb_info->min_refresh_rate_hz = min_rate;
11434                                 vsdb_info->max_refresh_rate_hz = max_rate;
11435                                 return true;
11436                         }
11437                         /* not amd vsdb */
11438                         return false;
11439                 }
11440
11441                 /* check for ack*/
11442                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11443                 if (!res)
11444                         return false;
11445         }
11446
11447         return false;
11448 }
11449
11450 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11451                 uint8_t *edid_ext, int len,
11452                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11453 {
11454         int i;
11455
11456         /* send extension block to DMCU for parsing */
11457         for (i = 0; i < len; i += 8) {
11458                 /* send 8 bytes a time */
11459                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11460                         return false;
11461         }
11462
11463         return vsdb_info->freesync_supported;
11464 }
11465
11466 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11467                 uint8_t *edid_ext, int len,
11468                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11469 {
11470         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11471
11472         if (adev->dm.dmub_srv)
11473                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11474         else
11475                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11476 }
11477
11478 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11479                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11480 {
11481         uint8_t *edid_ext = NULL;
11482         int i;
11483         bool valid_vsdb_found = false;
11484
11485         /*----- drm_find_cea_extension() -----*/
11486         /* No EDID or EDID extensions */
11487         if (edid == NULL || edid->extensions == 0)
11488                 return -ENODEV;
11489
11490         /* Find CEA extension */
11491         for (i = 0; i < edid->extensions; i++) {
11492                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11493                 if (edid_ext[0] == CEA_EXT)
11494                         break;
11495         }
11496
11497         if (i == edid->extensions)
11498                 return -ENODEV;
11499
11500         /*----- cea_db_offsets() -----*/
11501         if (edid_ext[0] != CEA_EXT)
11502                 return -ENODEV;
11503
11504         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11505
11506         return valid_vsdb_found ? i : -ENODEV;
11507 }
11508
11509 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11510                                         struct edid *edid)
11511 {
11512         int i = 0;
11513         struct detailed_timing *timing;
11514         struct detailed_non_pixel *data;
11515         struct detailed_data_monitor_range *range;
11516         struct amdgpu_dm_connector *amdgpu_dm_connector =
11517                         to_amdgpu_dm_connector(connector);
11518         struct dm_connector_state *dm_con_state = NULL;
11519         struct dc_sink *sink;
11520
11521         struct drm_device *dev = connector->dev;
11522         struct amdgpu_device *adev = drm_to_adev(dev);
11523         bool freesync_capable = false;
11524         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11525
11526         if (!connector->state) {
11527                 DRM_ERROR("%s - Connector has no state", __func__);
11528                 goto update;
11529         }
11530
11531         sink = amdgpu_dm_connector->dc_sink ?
11532                 amdgpu_dm_connector->dc_sink :
11533                 amdgpu_dm_connector->dc_em_sink;
11534
11535         if (!edid || !sink) {
11536                 dm_con_state = to_dm_connector_state(connector->state);
11537
11538                 amdgpu_dm_connector->min_vfreq = 0;
11539                 amdgpu_dm_connector->max_vfreq = 0;
11540                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11541                 connector->display_info.monitor_range.min_vfreq = 0;
11542                 connector->display_info.monitor_range.max_vfreq = 0;
11543                 freesync_capable = false;
11544
11545                 goto update;
11546         }
11547
11548         dm_con_state = to_dm_connector_state(connector->state);
11549
11550         if (!adev->dm.freesync_module)
11551                 goto update;
11552
11553
11554         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11555                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11556                 bool edid_check_required = false;
11557
11558                 if (edid) {
11559                         edid_check_required = is_dp_capable_without_timing_msa(
11560                                                 adev->dm.dc,
11561                                                 amdgpu_dm_connector);
11562                 }
11563
11564                 if (edid_check_required == true && (edid->version > 1 ||
11565                    (edid->version == 1 && edid->revision > 1))) {
11566                         for (i = 0; i < 4; i++) {
11567
11568                                 timing  = &edid->detailed_timings[i];
11569                                 data    = &timing->data.other_data;
11570                                 range   = &data->data.range;
11571                                 /*
11572                                  * Check if monitor has continuous frequency mode
11573                                  */
11574                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11575                                         continue;
11576                                 /*
11577                                  * Check for flag range limits only. If flag == 1 then
11578                                  * no additional timing information provided.
11579                                  * Default GTF, GTF Secondary curve and CVT are not
11580                                  * supported
11581                                  */
11582                                 if (range->flags != 1)
11583                                         continue;
11584
11585                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11586                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11587                                 amdgpu_dm_connector->pixel_clock_mhz =
11588                                         range->pixel_clock_mhz * 10;
11589
11590                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11591                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11592
11593                                 break;
11594                         }
11595
11596                         if (amdgpu_dm_connector->max_vfreq -
11597                             amdgpu_dm_connector->min_vfreq > 10) {
11598
11599                                 freesync_capable = true;
11600                         }
11601                 }
11602         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11603                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11604                 if (i >= 0 && vsdb_info.freesync_supported) {
11605                         timing  = &edid->detailed_timings[i];
11606                         data    = &timing->data.other_data;
11607
11608                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11609                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11610                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11611                                 freesync_capable = true;
11612
11613                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11614                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11615                 }
11616         }
11617
11618 update:
11619         if (dm_con_state)
11620                 dm_con_state->freesync_capable = freesync_capable;
11621
11622         if (connector->vrr_capable_property)
11623                 drm_connector_set_vrr_capable_property(connector,
11624                                                        freesync_capable);
11625 }
11626
11627 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11628 {
11629         struct amdgpu_device *adev = drm_to_adev(dev);
11630         struct dc *dc = adev->dm.dc;
11631         int i;
11632
11633         mutex_lock(&adev->dm.dc_lock);
11634         if (dc->current_state) {
11635                 for (i = 0; i < dc->current_state->stream_count; ++i)
11636                         dc->current_state->streams[i]
11637                                 ->triggered_crtc_reset.enabled =
11638                                 adev->dm.force_timing_sync;
11639
11640                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11641                 dc_trigger_sync(dc, dc->current_state);
11642         }
11643         mutex_unlock(&adev->dm.dc_lock);
11644 }
11645
11646 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11647                        uint32_t value, const char *func_name)
11648 {
11649 #ifdef DM_CHECK_ADDR_0
11650         if (address == 0) {
11651                 DC_ERR("invalid register write. address = 0");
11652                 return;
11653         }
11654 #endif
11655         cgs_write_register(ctx->cgs_device, address, value);
11656         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11657 }
11658
11659 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11660                           const char *func_name)
11661 {
11662         uint32_t value;
11663 #ifdef DM_CHECK_ADDR_0
11664         if (address == 0) {
11665                 DC_ERR("invalid register read; address = 0\n");
11666                 return 0;
11667         }
11668 #endif
11669
11670         if (ctx->dmub_srv &&
11671             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11672             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11673                 ASSERT(false);
11674                 return 0;
11675         }
11676
11677         value = cgs_read_register(ctx->cgs_device, address);
11678
11679         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11680
11681         return value;
11682 }
11683
11684 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11685                                                 struct dc_context *ctx,
11686                                                 uint8_t status_type,
11687                                                 uint32_t *operation_result)
11688 {
11689         struct amdgpu_device *adev = ctx->driver_context;
11690         int return_status = -1;
11691         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11692
11693         if (is_cmd_aux) {
11694                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11695                         return_status = p_notify->aux_reply.length;
11696                         *operation_result = p_notify->result;
11697                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11698                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11699                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11700                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11701                 } else {
11702                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11703                 }
11704         } else {
11705                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11706                         return_status = 0;
11707                         *operation_result = p_notify->sc_status;
11708                 } else {
11709                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11710                 }
11711         }
11712
11713         return return_status;
11714 }
11715
11716 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11717         unsigned int link_index, void *cmd_payload, void *operation_result)
11718 {
11719         struct amdgpu_device *adev = ctx->driver_context;
11720         int ret = 0;
11721
11722         if (is_cmd_aux) {
11723                 dc_process_dmub_aux_transfer_async(ctx->dc,
11724                         link_index, (struct aux_payload *)cmd_payload);
11725         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11726                                         (struct set_config_cmd_payload *)cmd_payload,
11727                                         adev->dm.dmub_notify)) {
11728                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11729                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11730                                         (uint32_t *)operation_result);
11731         }
11732
11733         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11734         if (ret == 0) {
11735                 DRM_ERROR("wait_for_completion_timeout timeout!");
11736                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11737                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11738                                 (uint32_t *)operation_result);
11739         }
11740
11741         if (is_cmd_aux) {
11742                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11743                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11744
11745                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11746                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11747                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11748                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11749                                        adev->dm.dmub_notify->aux_reply.length);
11750                         }
11751                 }
11752         }
11753
11754         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11755                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11756                         (uint32_t *)operation_result);
11757 }
11758
11759 /*
11760  * Check whether seamless boot is supported.
11761  *
11762  * So far we only support seamless boot on CHIP_VANGOGH.
11763  * If everything goes well, we may consider expanding
11764  * seamless boot to other ASICs.
11765  */
11766 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11767 {
11768         switch (adev->asic_type) {
11769         case CHIP_VANGOGH:
11770                 if (!adev->mman.keep_stolen_vga_memory)
11771                         return true;
11772                 break;
11773         default:
11774                 break;
11775         }
11776
11777         return false;
11778 }