Merge tag 'libnvdimm-fixes-5.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "link_enc_cfg.h"
33 #include "dc/inc/core_types.h"
34 #include "dal_asic_id.h"
35 #include "dmub/dmub_srv.h"
36 #include "dc/inc/hw/dmcu.h"
37 #include "dc/inc/hw/abm.h"
38 #include "dc/dc_dmub_srv.h"
39 #include "dc/dc_edid_parser.h"
40 #include "dc/dc_stat.h"
41 #include "amdgpu_dm_trace.h"
42
43 #include "vid.h"
44 #include "amdgpu.h"
45 #include "amdgpu_display.h"
46 #include "amdgpu_ucode.h"
47 #include "atom.h"
48 #include "amdgpu_dm.h"
49 #ifdef CONFIG_DRM_AMD_DC_HDCP
50 #include "amdgpu_dm_hdcp.h"
51 #include <drm/display/drm_hdcp_helper.h>
52 #endif
53 #include "amdgpu_pm.h"
54 #include "amdgpu_atombios.h"
55
56 #include "amd_shared.h"
57 #include "amdgpu_dm_irq.h"
58 #include "dm_helpers.h"
59 #include "amdgpu_dm_mst_types.h"
60 #if defined(CONFIG_DEBUG_FS)
61 #include "amdgpu_dm_debugfs.h"
62 #endif
63 #include "amdgpu_dm_psr.h"
64
65 #include "ivsrcid/ivsrcid_vislands30.h"
66
67 #include "i2caux_interface.h"
68 #include <linux/module.h>
69 #include <linux/moduleparam.h>
70 #include <linux/types.h>
71 #include <linux/pm_runtime.h>
72 #include <linux/pci.h>
73 #include <linux/firmware.h>
74 #include <linux/component.h>
75
76 #include <drm/display/drm_dp_mst_helper.h>
77 #include <drm/display/drm_hdmi_helper.h>
78 #include <drm/drm_atomic.h>
79 #include <drm/drm_atomic_uapi.h>
80 #include <drm/drm_atomic_helper.h>
81 #include <drm/drm_fb_helper.h>
82 #include <drm/drm_fourcc.h>
83 #include <drm/drm_edid.h>
84 #include <drm/drm_vblank.h>
85 #include <drm/drm_audio_component.h>
86
87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89 #include "dcn/dcn_1_0_offset.h"
90 #include "dcn/dcn_1_0_sh_mask.h"
91 #include "soc15_hw_ip.h"
92 #include "vega10_ip_offset.h"
93
94 #include "soc15_common.h"
95
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
118 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
120
121 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
122 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
123
124 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
125 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
126
127 /* Number of bytes in PSP header for firmware. */
128 #define PSP_HEADER_BYTES 0x100
129
130 /* Number of bytes in PSP footer for firmware. */
131 #define PSP_FOOTER_BYTES 0x100
132
133 /**
134  * DOC: overview
135  *
136  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
137  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
138  * requests into DC requests, and DC responses into DRM responses.
139  *
140  * The root control structure is &struct amdgpu_display_manager.
141  */
142
143 /* basic init/fini API */
144 static int amdgpu_dm_init(struct amdgpu_device *adev);
145 static void amdgpu_dm_fini(struct amdgpu_device *adev);
146 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
147
148 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
149 {
150         switch (link->dpcd_caps.dongle_type) {
151         case DISPLAY_DONGLE_NONE:
152                 return DRM_MODE_SUBCONNECTOR_Native;
153         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
154                 return DRM_MODE_SUBCONNECTOR_VGA;
155         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
156         case DISPLAY_DONGLE_DP_DVI_DONGLE:
157                 return DRM_MODE_SUBCONNECTOR_DVID;
158         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
159         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
160                 return DRM_MODE_SUBCONNECTOR_HDMIA;
161         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
162         default:
163                 return DRM_MODE_SUBCONNECTOR_Unknown;
164         }
165 }
166
167 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
168 {
169         struct dc_link *link = aconnector->dc_link;
170         struct drm_connector *connector = &aconnector->base;
171         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
172
173         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
174                 return;
175
176         if (aconnector->dc_sink)
177                 subconnector = get_subconnector_type(link);
178
179         drm_object_property_set_value(&connector->base,
180                         connector->dev->mode_config.dp_subconnector_property,
181                         subconnector);
182 }
183
184 /*
185  * initializes drm_device display related structures, based on the information
186  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
187  * drm_encoder, drm_mode_config
188  *
189  * Returns 0 on success
190  */
191 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
192 /* removes and deallocates the drm structures, created by the above function */
193 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
194
195 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
196                                 struct drm_plane *plane,
197                                 unsigned long possible_crtcs,
198                                 const struct dc_plane_cap *plane_cap);
199 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
200                                struct drm_plane *plane,
201                                uint32_t link_index);
202 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
203                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
204                                     uint32_t link_index,
205                                     struct amdgpu_encoder *amdgpu_encoder);
206 static int amdgpu_dm_encoder_init(struct drm_device *dev,
207                                   struct amdgpu_encoder *aencoder,
208                                   uint32_t link_index);
209
210 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
211
212 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
213
214 static int amdgpu_dm_atomic_check(struct drm_device *dev,
215                                   struct drm_atomic_state *state);
216
217 static void handle_cursor_update(struct drm_plane *plane,
218                                  struct drm_plane_state *old_plane_state);
219
220 static const struct drm_format_info *
221 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
222
223 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
224 static void handle_hpd_rx_irq(void *param);
225
226 static bool
227 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
228                                  struct drm_crtc_state *new_crtc_state);
229 /*
230  * dm_vblank_get_counter
231  *
232  * @brief
233  * Get counter for number of vertical blanks
234  *
235  * @param
236  * struct amdgpu_device *adev - [in] desired amdgpu device
237  * int disp_idx - [in] which CRTC to get the counter from
238  *
239  * @return
240  * Counter for vertical blanks
241  */
242 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
243 {
244         if (crtc >= adev->mode_info.num_crtc)
245                 return 0;
246         else {
247                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
248
249                 if (acrtc->dm_irq_params.stream == NULL) {
250                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
251                                   crtc);
252                         return 0;
253                 }
254
255                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
256         }
257 }
258
259 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
260                                   u32 *vbl, u32 *position)
261 {
262         uint32_t v_blank_start, v_blank_end, h_position, v_position;
263
264         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
265                 return -EINVAL;
266         else {
267                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
268
269                 if (acrtc->dm_irq_params.stream ==  NULL) {
270                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
271                                   crtc);
272                         return 0;
273                 }
274
275                 /*
276                  * TODO rework base driver to use values directly.
277                  * for now parse it back into reg-format
278                  */
279                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
280                                          &v_blank_start,
281                                          &v_blank_end,
282                                          &h_position,
283                                          &v_position);
284
285                 *position = v_position | (h_position << 16);
286                 *vbl = v_blank_start | (v_blank_end << 16);
287         }
288
289         return 0;
290 }
291
292 static bool dm_is_idle(void *handle)
293 {
294         /* XXX todo */
295         return true;
296 }
297
298 static int dm_wait_for_idle(void *handle)
299 {
300         /* XXX todo */
301         return 0;
302 }
303
304 static bool dm_check_soft_reset(void *handle)
305 {
306         return false;
307 }
308
309 static int dm_soft_reset(void *handle)
310 {
311         /* XXX todo */
312         return 0;
313 }
314
315 static struct amdgpu_crtc *
316 get_crtc_by_otg_inst(struct amdgpu_device *adev,
317                      int otg_inst)
318 {
319         struct drm_device *dev = adev_to_drm(adev);
320         struct drm_crtc *crtc;
321         struct amdgpu_crtc *amdgpu_crtc;
322
323         if (WARN_ON(otg_inst == -1))
324                 return adev->mode_info.crtcs[0];
325
326         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
327                 amdgpu_crtc = to_amdgpu_crtc(crtc);
328
329                 if (amdgpu_crtc->otg_inst == otg_inst)
330                         return amdgpu_crtc;
331         }
332
333         return NULL;
334 }
335
336 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
337 {
338         return acrtc->dm_irq_params.freesync_config.state ==
339                        VRR_STATE_ACTIVE_VARIABLE ||
340                acrtc->dm_irq_params.freesync_config.state ==
341                        VRR_STATE_ACTIVE_FIXED;
342 }
343
344 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
345 {
346         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
347                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
348 }
349
350 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
351                                               struct dm_crtc_state *new_state)
352 {
353         if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
354                 return true;
355         else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
356                 return true;
357         else
358                 return false;
359 }
360
361 /**
362  * dm_pflip_high_irq() - Handle pageflip interrupt
363  * @interrupt_params: ignored
364  *
365  * Handles the pageflip interrupt by notifying all interested parties
366  * that the pageflip has been completed.
367  */
368 static void dm_pflip_high_irq(void *interrupt_params)
369 {
370         struct amdgpu_crtc *amdgpu_crtc;
371         struct common_irq_params *irq_params = interrupt_params;
372         struct amdgpu_device *adev = irq_params->adev;
373         unsigned long flags;
374         struct drm_pending_vblank_event *e;
375         uint32_t vpos, hpos, v_blank_start, v_blank_end;
376         bool vrr_active;
377
378         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
379
380         /* IRQ could occur when in initial stage */
381         /* TODO work and BO cleanup */
382         if (amdgpu_crtc == NULL) {
383                 DC_LOG_PFLIP("CRTC is null, returning.\n");
384                 return;
385         }
386
387         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
388
389         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
390                 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
391                                                  amdgpu_crtc->pflip_status,
392                                                  AMDGPU_FLIP_SUBMITTED,
393                                                  amdgpu_crtc->crtc_id,
394                                                  amdgpu_crtc);
395                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
396                 return;
397         }
398
399         /* page flip completed. */
400         e = amdgpu_crtc->event;
401         amdgpu_crtc->event = NULL;
402
403         WARN_ON(!e);
404
405         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
406
407         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
408         if (!vrr_active ||
409             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
410                                       &v_blank_end, &hpos, &vpos) ||
411             (vpos < v_blank_start)) {
412                 /* Update to correct count and vblank timestamp if racing with
413                  * vblank irq. This also updates to the correct vblank timestamp
414                  * even in VRR mode, as scanout is past the front-porch atm.
415                  */
416                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
417
418                 /* Wake up userspace by sending the pageflip event with proper
419                  * count and timestamp of vblank of flip completion.
420                  */
421                 if (e) {
422                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
423
424                         /* Event sent, so done with vblank for this flip */
425                         drm_crtc_vblank_put(&amdgpu_crtc->base);
426                 }
427         } else if (e) {
428                 /* VRR active and inside front-porch: vblank count and
429                  * timestamp for pageflip event will only be up to date after
430                  * drm_crtc_handle_vblank() has been executed from late vblank
431                  * irq handler after start of back-porch (vline 0). We queue the
432                  * pageflip event for send-out by drm_crtc_handle_vblank() with
433                  * updated timestamp and count, once it runs after us.
434                  *
435                  * We need to open-code this instead of using the helper
436                  * drm_crtc_arm_vblank_event(), as that helper would
437                  * call drm_crtc_accurate_vblank_count(), which we must
438                  * not call in VRR mode while we are in front-porch!
439                  */
440
441                 /* sequence will be replaced by real count during send-out. */
442                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
443                 e->pipe = amdgpu_crtc->crtc_id;
444
445                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
446                 e = NULL;
447         }
448
449         /* Keep track of vblank of this flip for flip throttling. We use the
450          * cooked hw counter, as that one incremented at start of this vblank
451          * of pageflip completion, so last_flip_vblank is the forbidden count
452          * for queueing new pageflips if vsync + VRR is enabled.
453          */
454         amdgpu_crtc->dm_irq_params.last_flip_vblank =
455                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
456
457         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
458         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
459
460         DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
461                      amdgpu_crtc->crtc_id, amdgpu_crtc,
462                      vrr_active, (int) !e);
463 }
464
465 static void dm_vupdate_high_irq(void *interrupt_params)
466 {
467         struct common_irq_params *irq_params = interrupt_params;
468         struct amdgpu_device *adev = irq_params->adev;
469         struct amdgpu_crtc *acrtc;
470         struct drm_device *drm_dev;
471         struct drm_vblank_crtc *vblank;
472         ktime_t frame_duration_ns, previous_timestamp;
473         unsigned long flags;
474         int vrr_active;
475
476         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
477
478         if (acrtc) {
479                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
480                 drm_dev = acrtc->base.dev;
481                 vblank = &drm_dev->vblank[acrtc->base.index];
482                 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
483                 frame_duration_ns = vblank->time - previous_timestamp;
484
485                 if (frame_duration_ns > 0) {
486                         trace_amdgpu_refresh_rate_track(acrtc->base.index,
487                                                 frame_duration_ns,
488                                                 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
489                         atomic64_set(&irq_params->previous_timestamp, vblank->time);
490                 }
491
492                 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
493                               acrtc->crtc_id,
494                               vrr_active);
495
496                 /* Core vblank handling is done here after end of front-porch in
497                  * vrr mode, as vblank timestamping will give valid results
498                  * while now done after front-porch. This will also deliver
499                  * page-flip completion events that have been queued to us
500                  * if a pageflip happened inside front-porch.
501                  */
502                 if (vrr_active) {
503                         drm_crtc_handle_vblank(&acrtc->base);
504
505                         /* BTR processing for pre-DCE12 ASICs */
506                         if (acrtc->dm_irq_params.stream &&
507                             adev->family < AMDGPU_FAMILY_AI) {
508                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
509                                 mod_freesync_handle_v_update(
510                                     adev->dm.freesync_module,
511                                     acrtc->dm_irq_params.stream,
512                                     &acrtc->dm_irq_params.vrr_params);
513
514                                 dc_stream_adjust_vmin_vmax(
515                                     adev->dm.dc,
516                                     acrtc->dm_irq_params.stream,
517                                     &acrtc->dm_irq_params.vrr_params.adjust);
518                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
519                         }
520                 }
521         }
522 }
523
524 /**
525  * dm_crtc_high_irq() - Handles CRTC interrupt
526  * @interrupt_params: used for determining the CRTC instance
527  *
528  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
529  * event handler.
530  */
531 static void dm_crtc_high_irq(void *interrupt_params)
532 {
533         struct common_irq_params *irq_params = interrupt_params;
534         struct amdgpu_device *adev = irq_params->adev;
535         struct amdgpu_crtc *acrtc;
536         unsigned long flags;
537         int vrr_active;
538
539         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
540         if (!acrtc)
541                 return;
542
543         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
544
545         DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
546                       vrr_active, acrtc->dm_irq_params.active_planes);
547
548         /**
549          * Core vblank handling at start of front-porch is only possible
550          * in non-vrr mode, as only there vblank timestamping will give
551          * valid results while done in front-porch. Otherwise defer it
552          * to dm_vupdate_high_irq after end of front-porch.
553          */
554         if (!vrr_active)
555                 drm_crtc_handle_vblank(&acrtc->base);
556
557         /**
558          * Following stuff must happen at start of vblank, for crc
559          * computation and below-the-range btr support in vrr mode.
560          */
561         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
562
563         /* BTR updates need to happen before VUPDATE on Vega and above. */
564         if (adev->family < AMDGPU_FAMILY_AI)
565                 return;
566
567         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
568
569         if (acrtc->dm_irq_params.stream &&
570             acrtc->dm_irq_params.vrr_params.supported &&
571             acrtc->dm_irq_params.freesync_config.state ==
572                     VRR_STATE_ACTIVE_VARIABLE) {
573                 mod_freesync_handle_v_update(adev->dm.freesync_module,
574                                              acrtc->dm_irq_params.stream,
575                                              &acrtc->dm_irq_params.vrr_params);
576
577                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
578                                            &acrtc->dm_irq_params.vrr_params.adjust);
579         }
580
581         /*
582          * If there aren't any active_planes then DCH HUBP may be clock-gated.
583          * In that case, pageflip completion interrupts won't fire and pageflip
584          * completion events won't get delivered. Prevent this by sending
585          * pending pageflip events from here if a flip is still pending.
586          *
587          * If any planes are enabled, use dm_pflip_high_irq() instead, to
588          * avoid race conditions between flip programming and completion,
589          * which could cause too early flip completion events.
590          */
591         if (adev->family >= AMDGPU_FAMILY_RV &&
592             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
593             acrtc->dm_irq_params.active_planes == 0) {
594                 if (acrtc->event) {
595                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
596                         acrtc->event = NULL;
597                         drm_crtc_vblank_put(&acrtc->base);
598                 }
599                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
600         }
601
602         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
603 }
604
605 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
606 /**
607  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
608  * DCN generation ASICs
609  * @interrupt_params: interrupt parameters
610  *
611  * Used to set crc window/read out crc value at vertical line 0 position
612  */
613 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
614 {
615         struct common_irq_params *irq_params = interrupt_params;
616         struct amdgpu_device *adev = irq_params->adev;
617         struct amdgpu_crtc *acrtc;
618
619         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
620
621         if (!acrtc)
622                 return;
623
624         amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
625 }
626 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
627
628 /**
629  * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
630  * @adev: amdgpu_device pointer
631  * @notify: dmub notification structure
632  *
633  * Dmub AUX or SET_CONFIG command completion processing callback
634  * Copies dmub notification to DM which is to be read by AUX command.
635  * issuing thread and also signals the event to wake up the thread.
636  */
637 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
638                                         struct dmub_notification *notify)
639 {
640         if (adev->dm.dmub_notify)
641                 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
642         if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
643                 complete(&adev->dm.dmub_aux_transfer_done);
644 }
645
646 /**
647  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
648  * @adev: amdgpu_device pointer
649  * @notify: dmub notification structure
650  *
651  * Dmub Hpd interrupt processing callback. Gets displayindex through the
652  * ink index and calls helper to do the processing.
653  */
654 static void dmub_hpd_callback(struct amdgpu_device *adev,
655                               struct dmub_notification *notify)
656 {
657         struct amdgpu_dm_connector *aconnector;
658         struct amdgpu_dm_connector *hpd_aconnector = NULL;
659         struct drm_connector *connector;
660         struct drm_connector_list_iter iter;
661         struct dc_link *link;
662         uint8_t link_index = 0;
663         struct drm_device *dev;
664
665         if (adev == NULL)
666                 return;
667
668         if (notify == NULL) {
669                 DRM_ERROR("DMUB HPD callback notification was NULL");
670                 return;
671         }
672
673         if (notify->link_index > adev->dm.dc->link_count) {
674                 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
675                 return;
676         }
677
678         link_index = notify->link_index;
679         link = adev->dm.dc->links[link_index];
680         dev = adev->dm.ddev;
681
682         drm_connector_list_iter_begin(dev, &iter);
683         drm_for_each_connector_iter(connector, &iter) {
684                 aconnector = to_amdgpu_dm_connector(connector);
685                 if (link && aconnector->dc_link == link) {
686                         DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
687                         hpd_aconnector = aconnector;
688                         break;
689                 }
690         }
691         drm_connector_list_iter_end(&iter);
692
693         if (hpd_aconnector) {
694                 if (notify->type == DMUB_NOTIFICATION_HPD)
695                         handle_hpd_irq_helper(hpd_aconnector);
696                 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
697                         handle_hpd_rx_irq(hpd_aconnector);
698         }
699 }
700
701 /**
702  * register_dmub_notify_callback - Sets callback for DMUB notify
703  * @adev: amdgpu_device pointer
704  * @type: Type of dmub notification
705  * @callback: Dmub interrupt callback function
706  * @dmub_int_thread_offload: offload indicator
707  *
708  * API to register a dmub callback handler for a dmub notification
709  * Also sets indicator whether callback processing to be offloaded.
710  * to dmub interrupt handling thread
711  * Return: true if successfully registered, false if there is existing registration
712  */
713 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
714                                           enum dmub_notification_type type,
715                                           dmub_notify_interrupt_callback_t callback,
716                                           bool dmub_int_thread_offload)
717 {
718         if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
719                 adev->dm.dmub_callback[type] = callback;
720                 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
721         } else
722                 return false;
723
724         return true;
725 }
726
727 static void dm_handle_hpd_work(struct work_struct *work)
728 {
729         struct dmub_hpd_work *dmub_hpd_wrk;
730
731         dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
732
733         if (!dmub_hpd_wrk->dmub_notify) {
734                 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
735                 return;
736         }
737
738         if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
739                 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
740                 dmub_hpd_wrk->dmub_notify);
741         }
742
743         kfree(dmub_hpd_wrk->dmub_notify);
744         kfree(dmub_hpd_wrk);
745
746 }
747
748 #define DMUB_TRACE_MAX_READ 64
749 /**
750  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
751  * @interrupt_params: used for determining the Outbox instance
752  *
753  * Handles the Outbox Interrupt
754  * event handler.
755  */
756 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
757 {
758         struct dmub_notification notify;
759         struct common_irq_params *irq_params = interrupt_params;
760         struct amdgpu_device *adev = irq_params->adev;
761         struct amdgpu_display_manager *dm = &adev->dm;
762         struct dmcub_trace_buf_entry entry = { 0 };
763         uint32_t count = 0;
764         struct dmub_hpd_work *dmub_hpd_wrk;
765         struct dc_link *plink = NULL;
766
767         if (dc_enable_dmub_notifications(adev->dm.dc) &&
768                 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
769
770                 do {
771                         dc_stat_get_dmub_notification(adev->dm.dc, &notify);
772                         if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
773                                 DRM_ERROR("DM: notify type %d invalid!", notify.type);
774                                 continue;
775                         }
776                         if (!dm->dmub_callback[notify.type]) {
777                                 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
778                                 continue;
779                         }
780                         if (dm->dmub_thread_offload[notify.type] == true) {
781                                 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
782                                 if (!dmub_hpd_wrk) {
783                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk");
784                                         return;
785                                 }
786                                 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
787                                 if (!dmub_hpd_wrk->dmub_notify) {
788                                         kfree(dmub_hpd_wrk);
789                                         DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
790                                         return;
791                                 }
792                                 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
793                                 if (dmub_hpd_wrk->dmub_notify)
794                                         memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
795                                 dmub_hpd_wrk->adev = adev;
796                                 if (notify.type == DMUB_NOTIFICATION_HPD) {
797                                         plink = adev->dm.dc->links[notify.link_index];
798                                         if (plink) {
799                                                 plink->hpd_status =
800                                                         notify.hpd_status == DP_HPD_PLUG;
801                                         }
802                                 }
803                                 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
804                         } else {
805                                 dm->dmub_callback[notify.type](adev, &notify);
806                         }
807                 } while (notify.pending_notification);
808         }
809
810
811         do {
812                 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
813                         trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
814                                                         entry.param0, entry.param1);
815
816                         DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
817                                  entry.trace_code, entry.tick_count, entry.param0, entry.param1);
818                 } else
819                         break;
820
821                 count++;
822
823         } while (count <= DMUB_TRACE_MAX_READ);
824
825         if (count > DMUB_TRACE_MAX_READ)
826                 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
827 }
828
829 static int dm_set_clockgating_state(void *handle,
830                   enum amd_clockgating_state state)
831 {
832         return 0;
833 }
834
835 static int dm_set_powergating_state(void *handle,
836                   enum amd_powergating_state state)
837 {
838         return 0;
839 }
840
841 /* Prototypes of private functions */
842 static int dm_early_init(void* handle);
843
844 /* Allocate memory for FBC compressed data  */
845 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
846 {
847         struct drm_device *dev = connector->dev;
848         struct amdgpu_device *adev = drm_to_adev(dev);
849         struct dm_compressor_info *compressor = &adev->dm.compressor;
850         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
851         struct drm_display_mode *mode;
852         unsigned long max_size = 0;
853
854         if (adev->dm.dc->fbc_compressor == NULL)
855                 return;
856
857         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
858                 return;
859
860         if (compressor->bo_ptr)
861                 return;
862
863
864         list_for_each_entry(mode, &connector->modes, head) {
865                 if (max_size < mode->htotal * mode->vtotal)
866                         max_size = mode->htotal * mode->vtotal;
867         }
868
869         if (max_size) {
870                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
871                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
872                             &compressor->gpu_addr, &compressor->cpu_addr);
873
874                 if (r)
875                         DRM_ERROR("DM: Failed to initialize FBC\n");
876                 else {
877                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
878                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
879                 }
880
881         }
882
883 }
884
885 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
886                                           int pipe, bool *enabled,
887                                           unsigned char *buf, int max_bytes)
888 {
889         struct drm_device *dev = dev_get_drvdata(kdev);
890         struct amdgpu_device *adev = drm_to_adev(dev);
891         struct drm_connector *connector;
892         struct drm_connector_list_iter conn_iter;
893         struct amdgpu_dm_connector *aconnector;
894         int ret = 0;
895
896         *enabled = false;
897
898         mutex_lock(&adev->dm.audio_lock);
899
900         drm_connector_list_iter_begin(dev, &conn_iter);
901         drm_for_each_connector_iter(connector, &conn_iter) {
902                 aconnector = to_amdgpu_dm_connector(connector);
903                 if (aconnector->audio_inst != port)
904                         continue;
905
906                 *enabled = true;
907                 ret = drm_eld_size(connector->eld);
908                 memcpy(buf, connector->eld, min(max_bytes, ret));
909
910                 break;
911         }
912         drm_connector_list_iter_end(&conn_iter);
913
914         mutex_unlock(&adev->dm.audio_lock);
915
916         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
917
918         return ret;
919 }
920
921 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
922         .get_eld = amdgpu_dm_audio_component_get_eld,
923 };
924
925 static int amdgpu_dm_audio_component_bind(struct device *kdev,
926                                        struct device *hda_kdev, void *data)
927 {
928         struct drm_device *dev = dev_get_drvdata(kdev);
929         struct amdgpu_device *adev = drm_to_adev(dev);
930         struct drm_audio_component *acomp = data;
931
932         acomp->ops = &amdgpu_dm_audio_component_ops;
933         acomp->dev = kdev;
934         adev->dm.audio_component = acomp;
935
936         return 0;
937 }
938
939 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
940                                           struct device *hda_kdev, void *data)
941 {
942         struct drm_device *dev = dev_get_drvdata(kdev);
943         struct amdgpu_device *adev = drm_to_adev(dev);
944         struct drm_audio_component *acomp = data;
945
946         acomp->ops = NULL;
947         acomp->dev = NULL;
948         adev->dm.audio_component = NULL;
949 }
950
951 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
952         .bind   = amdgpu_dm_audio_component_bind,
953         .unbind = amdgpu_dm_audio_component_unbind,
954 };
955
956 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
957 {
958         int i, ret;
959
960         if (!amdgpu_audio)
961                 return 0;
962
963         adev->mode_info.audio.enabled = true;
964
965         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
966
967         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
968                 adev->mode_info.audio.pin[i].channels = -1;
969                 adev->mode_info.audio.pin[i].rate = -1;
970                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
971                 adev->mode_info.audio.pin[i].status_bits = 0;
972                 adev->mode_info.audio.pin[i].category_code = 0;
973                 adev->mode_info.audio.pin[i].connected = false;
974                 adev->mode_info.audio.pin[i].id =
975                         adev->dm.dc->res_pool->audios[i]->inst;
976                 adev->mode_info.audio.pin[i].offset = 0;
977         }
978
979         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
980         if (ret < 0)
981                 return ret;
982
983         adev->dm.audio_registered = true;
984
985         return 0;
986 }
987
988 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
989 {
990         if (!amdgpu_audio)
991                 return;
992
993         if (!adev->mode_info.audio.enabled)
994                 return;
995
996         if (adev->dm.audio_registered) {
997                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
998                 adev->dm.audio_registered = false;
999         }
1000
1001         /* TODO: Disable audio? */
1002
1003         adev->mode_info.audio.enabled = false;
1004 }
1005
1006 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1007 {
1008         struct drm_audio_component *acomp = adev->dm.audio_component;
1009
1010         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1011                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1012
1013                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1014                                                  pin, -1);
1015         }
1016 }
1017
1018 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1019 {
1020         const struct dmcub_firmware_header_v1_0 *hdr;
1021         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1022         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1023         const struct firmware *dmub_fw = adev->dm.dmub_fw;
1024         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1025         struct abm *abm = adev->dm.dc->res_pool->abm;
1026         struct dmub_srv_hw_params hw_params;
1027         enum dmub_status status;
1028         const unsigned char *fw_inst_const, *fw_bss_data;
1029         uint32_t i, fw_inst_const_size, fw_bss_data_size;
1030         bool has_hw_support;
1031
1032         if (!dmub_srv)
1033                 /* DMUB isn't supported on the ASIC. */
1034                 return 0;
1035
1036         if (!fb_info) {
1037                 DRM_ERROR("No framebuffer info for DMUB service.\n");
1038                 return -EINVAL;
1039         }
1040
1041         if (!dmub_fw) {
1042                 /* Firmware required for DMUB support. */
1043                 DRM_ERROR("No firmware provided for DMUB.\n");
1044                 return -EINVAL;
1045         }
1046
1047         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1048         if (status != DMUB_STATUS_OK) {
1049                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1050                 return -EINVAL;
1051         }
1052
1053         if (!has_hw_support) {
1054                 DRM_INFO("DMUB unsupported on ASIC\n");
1055                 return 0;
1056         }
1057
1058         /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1059         status = dmub_srv_hw_reset(dmub_srv);
1060         if (status != DMUB_STATUS_OK)
1061                 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1062
1063         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1064
1065         fw_inst_const = dmub_fw->data +
1066                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1067                         PSP_HEADER_BYTES;
1068
1069         fw_bss_data = dmub_fw->data +
1070                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1071                       le32_to_cpu(hdr->inst_const_bytes);
1072
1073         /* Copy firmware and bios info into FB memory. */
1074         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1075                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1076
1077         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1078
1079         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1080          * amdgpu_ucode_init_single_fw will load dmub firmware
1081          * fw_inst_const part to cw0; otherwise, the firmware back door load
1082          * will be done by dm_dmub_hw_init
1083          */
1084         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1085                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1086                                 fw_inst_const_size);
1087         }
1088
1089         if (fw_bss_data_size)
1090                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1091                        fw_bss_data, fw_bss_data_size);
1092
1093         /* Copy firmware bios info into FB memory. */
1094         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1095                adev->bios_size);
1096
1097         /* Reset regions that need to be reset. */
1098         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1099         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1100
1101         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1102                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1103
1104         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1105                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1106
1107         /* Initialize hardware. */
1108         memset(&hw_params, 0, sizeof(hw_params));
1109         hw_params.fb_base = adev->gmc.fb_start;
1110         hw_params.fb_offset = adev->gmc.aper_base;
1111
1112         /* backdoor load firmware and trigger dmub running */
1113         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1114                 hw_params.load_inst_const = true;
1115
1116         if (dmcu)
1117                 hw_params.psp_version = dmcu->psp_version;
1118
1119         for (i = 0; i < fb_info->num_fb; ++i)
1120                 hw_params.fb[i] = &fb_info->fb[i];
1121
1122         switch (adev->ip_versions[DCE_HWIP][0]) {
1123         case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
1124                 hw_params.dpia_supported = true;
1125                 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1126                 break;
1127         default:
1128                 break;
1129         }
1130
1131         status = dmub_srv_hw_init(dmub_srv, &hw_params);
1132         if (status != DMUB_STATUS_OK) {
1133                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1134                 return -EINVAL;
1135         }
1136
1137         /* Wait for firmware load to finish. */
1138         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1139         if (status != DMUB_STATUS_OK)
1140                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1141
1142         /* Init DMCU and ABM if available. */
1143         if (dmcu && abm) {
1144                 dmcu->funcs->dmcu_init(dmcu);
1145                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1146         }
1147
1148         if (!adev->dm.dc->ctx->dmub_srv)
1149                 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1150         if (!adev->dm.dc->ctx->dmub_srv) {
1151                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1152                 return -ENOMEM;
1153         }
1154
1155         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1156                  adev->dm.dmcub_fw_version);
1157
1158         return 0;
1159 }
1160
1161 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1162 {
1163         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1164         enum dmub_status status;
1165         bool init;
1166
1167         if (!dmub_srv) {
1168                 /* DMUB isn't supported on the ASIC. */
1169                 return;
1170         }
1171
1172         status = dmub_srv_is_hw_init(dmub_srv, &init);
1173         if (status != DMUB_STATUS_OK)
1174                 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1175
1176         if (status == DMUB_STATUS_OK && init) {
1177                 /* Wait for firmware load to finish. */
1178                 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1179                 if (status != DMUB_STATUS_OK)
1180                         DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1181         } else {
1182                 /* Perform the full hardware initialization. */
1183                 dm_dmub_hw_init(adev);
1184         }
1185 }
1186
1187 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1188 {
1189         uint64_t pt_base;
1190         uint32_t logical_addr_low;
1191         uint32_t logical_addr_high;
1192         uint32_t agp_base, agp_bot, agp_top;
1193         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1194
1195         memset(pa_config, 0, sizeof(*pa_config));
1196
1197         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1198         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1199
1200         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1201                 /*
1202                  * Raven2 has a HW issue that it is unable to use the vram which
1203                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1204                  * workaround that increase system aperture high address (add 1)
1205                  * to get rid of the VM fault and hardware hang.
1206                  */
1207                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1208         else
1209                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1210
1211         agp_base = 0;
1212         agp_bot = adev->gmc.agp_start >> 24;
1213         agp_top = adev->gmc.agp_end >> 24;
1214
1215
1216         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1217         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1218         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1219         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1220         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1221         page_table_base.low_part = lower_32_bits(pt_base);
1222
1223         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1224         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1225
1226         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1227         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1228         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1229
1230         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1231         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1232         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1233
1234         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1235         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1236         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1237
1238         pa_config->is_hvm_enabled = 0;
1239
1240 }
1241
1242 static void vblank_control_worker(struct work_struct *work)
1243 {
1244         struct vblank_control_work *vblank_work =
1245                 container_of(work, struct vblank_control_work, work);
1246         struct amdgpu_display_manager *dm = vblank_work->dm;
1247
1248         mutex_lock(&dm->dc_lock);
1249
1250         if (vblank_work->enable)
1251                 dm->active_vblank_irq_count++;
1252         else if(dm->active_vblank_irq_count)
1253                 dm->active_vblank_irq_count--;
1254
1255         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1256
1257         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1258
1259         /* Control PSR based on vblank requirements from OS */
1260         if (vblank_work->stream && vblank_work->stream->link) {
1261                 if (vblank_work->enable) {
1262                         if (vblank_work->stream->link->psr_settings.psr_allow_active)
1263                                 amdgpu_dm_psr_disable(vblank_work->stream);
1264                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1265                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
1266                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1267                         amdgpu_dm_psr_enable(vblank_work->stream);
1268                 }
1269         }
1270
1271         mutex_unlock(&dm->dc_lock);
1272
1273         dc_stream_release(vblank_work->stream);
1274
1275         kfree(vblank_work);
1276 }
1277
1278 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1279 {
1280         struct hpd_rx_irq_offload_work *offload_work;
1281         struct amdgpu_dm_connector *aconnector;
1282         struct dc_link *dc_link;
1283         struct amdgpu_device *adev;
1284         enum dc_connection_type new_connection_type = dc_connection_none;
1285         unsigned long flags;
1286
1287         offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1288         aconnector = offload_work->offload_wq->aconnector;
1289
1290         if (!aconnector) {
1291                 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1292                 goto skip;
1293         }
1294
1295         adev = drm_to_adev(aconnector->base.dev);
1296         dc_link = aconnector->dc_link;
1297
1298         mutex_lock(&aconnector->hpd_lock);
1299         if (!dc_link_detect_sink(dc_link, &new_connection_type))
1300                 DRM_ERROR("KMS: Failed to detect connector\n");
1301         mutex_unlock(&aconnector->hpd_lock);
1302
1303         if (new_connection_type == dc_connection_none)
1304                 goto skip;
1305
1306         if (amdgpu_in_reset(adev))
1307                 goto skip;
1308
1309         mutex_lock(&adev->dm.dc_lock);
1310         if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1311                 dc_link_dp_handle_automated_test(dc_link);
1312         else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1313                         hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1314                         dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1315                 dc_link_dp_handle_link_loss(dc_link);
1316                 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1317                 offload_work->offload_wq->is_handling_link_loss = false;
1318                 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1319         }
1320         mutex_unlock(&adev->dm.dc_lock);
1321
1322 skip:
1323         kfree(offload_work);
1324
1325 }
1326
1327 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1328 {
1329         int max_caps = dc->caps.max_links;
1330         int i = 0;
1331         struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1332
1333         hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1334
1335         if (!hpd_rx_offload_wq)
1336                 return NULL;
1337
1338
1339         for (i = 0; i < max_caps; i++) {
1340                 hpd_rx_offload_wq[i].wq =
1341                                     create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1342
1343                 if (hpd_rx_offload_wq[i].wq == NULL) {
1344                         DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1345                         return NULL;
1346                 }
1347
1348                 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1349         }
1350
1351         return hpd_rx_offload_wq;
1352 }
1353
1354 struct amdgpu_stutter_quirk {
1355         u16 chip_vendor;
1356         u16 chip_device;
1357         u16 subsys_vendor;
1358         u16 subsys_device;
1359         u8 revision;
1360 };
1361
1362 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1363         /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1364         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1365         { 0, 0, 0, 0, 0 },
1366 };
1367
1368 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1369 {
1370         const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1371
1372         while (p && p->chip_device != 0) {
1373                 if (pdev->vendor == p->chip_vendor &&
1374                     pdev->device == p->chip_device &&
1375                     pdev->subsystem_vendor == p->subsys_vendor &&
1376                     pdev->subsystem_device == p->subsys_device &&
1377                     pdev->revision == p->revision) {
1378                         return true;
1379                 }
1380                 ++p;
1381         }
1382         return false;
1383 }
1384
1385 static int amdgpu_dm_init(struct amdgpu_device *adev)
1386 {
1387         struct dc_init_data init_data;
1388 #ifdef CONFIG_DRM_AMD_DC_HDCP
1389         struct dc_callback_init init_params;
1390 #endif
1391         int r;
1392
1393         adev->dm.ddev = adev_to_drm(adev);
1394         adev->dm.adev = adev;
1395
1396         /* Zero all the fields */
1397         memset(&init_data, 0, sizeof(init_data));
1398 #ifdef CONFIG_DRM_AMD_DC_HDCP
1399         memset(&init_params, 0, sizeof(init_params));
1400 #endif
1401
1402         mutex_init(&adev->dm.dc_lock);
1403         mutex_init(&adev->dm.audio_lock);
1404         spin_lock_init(&adev->dm.vblank_lock);
1405
1406         if(amdgpu_dm_irq_init(adev)) {
1407                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1408                 goto error;
1409         }
1410
1411         init_data.asic_id.chip_family = adev->family;
1412
1413         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1414         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1415         init_data.asic_id.chip_id = adev->pdev->device;
1416
1417         init_data.asic_id.vram_width = adev->gmc.vram_width;
1418         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1419         init_data.asic_id.atombios_base_address =
1420                 adev->mode_info.atom_context->bios;
1421
1422         init_data.driver = adev;
1423
1424         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1425
1426         if (!adev->dm.cgs_device) {
1427                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1428                 goto error;
1429         }
1430
1431         init_data.cgs_device = adev->dm.cgs_device;
1432
1433         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1434
1435         switch (adev->ip_versions[DCE_HWIP][0]) {
1436         case IP_VERSION(2, 1, 0):
1437                 switch (adev->dm.dmcub_fw_version) {
1438                 case 0: /* development */
1439                 case 0x1: /* linux-firmware.git hash 6d9f399 */
1440                 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1441                         init_data.flags.disable_dmcu = false;
1442                         break;
1443                 default:
1444                         init_data.flags.disable_dmcu = true;
1445                 }
1446                 break;
1447         case IP_VERSION(2, 0, 3):
1448                 init_data.flags.disable_dmcu = true;
1449                 break;
1450         default:
1451                 break;
1452         }
1453
1454         switch (adev->asic_type) {
1455         case CHIP_CARRIZO:
1456         case CHIP_STONEY:
1457                 init_data.flags.gpu_vm_support = true;
1458                 break;
1459         default:
1460                 switch (adev->ip_versions[DCE_HWIP][0]) {
1461                 case IP_VERSION(1, 0, 0):
1462                 case IP_VERSION(1, 0, 1):
1463                         /* enable S/G on PCO and RV2 */
1464                         if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1465                             (adev->apu_flags & AMD_APU_IS_PICASSO))
1466                                 init_data.flags.gpu_vm_support = true;
1467                         break;
1468                 case IP_VERSION(2, 1, 0):
1469                 case IP_VERSION(3, 0, 1):
1470                 case IP_VERSION(3, 1, 2):
1471                 case IP_VERSION(3, 1, 3):
1472                 case IP_VERSION(3, 1, 5):
1473                 case IP_VERSION(3, 1, 6):
1474                         init_data.flags.gpu_vm_support = true;
1475                         break;
1476                 default:
1477                         break;
1478                 }
1479                 break;
1480         }
1481
1482         if (init_data.flags.gpu_vm_support)
1483                 adev->mode_info.gpu_vm_support = true;
1484
1485         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1486                 init_data.flags.fbc_support = true;
1487
1488         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1489                 init_data.flags.multi_mon_pp_mclk_switch = true;
1490
1491         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1492                 init_data.flags.disable_fractional_pwm = true;
1493
1494         if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1495                 init_data.flags.edp_no_power_sequencing = true;
1496
1497         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1498                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1499         if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1500                 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1501
1502         init_data.flags.seamless_boot_edp_requested = false;
1503
1504         if (check_seamless_boot_capability(adev)) {
1505                 init_data.flags.seamless_boot_edp_requested = true;
1506                 init_data.flags.allow_seamless_boot_optimization = true;
1507                 DRM_INFO("Seamless boot condition check passed\n");
1508         }
1509
1510         INIT_LIST_HEAD(&adev->dm.da_list);
1511         /* Display Core create. */
1512         adev->dm.dc = dc_create(&init_data);
1513
1514         if (adev->dm.dc) {
1515                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1516         } else {
1517                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1518                 goto error;
1519         }
1520
1521         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1522                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1523                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1524         }
1525
1526         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1527                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1528         if (dm_should_disable_stutter(adev->pdev))
1529                 adev->dm.dc->debug.disable_stutter = true;
1530
1531         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1532                 adev->dm.dc->debug.disable_stutter = true;
1533
1534         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1535                 adev->dm.dc->debug.disable_dsc = true;
1536                 adev->dm.dc->debug.disable_dsc_edp = true;
1537         }
1538
1539         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1540                 adev->dm.dc->debug.disable_clock_gate = true;
1541
1542         r = dm_dmub_hw_init(adev);
1543         if (r) {
1544                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1545                 goto error;
1546         }
1547
1548         dc_hardware_init(adev->dm.dc);
1549
1550         adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1551         if (!adev->dm.hpd_rx_offload_wq) {
1552                 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1553                 goto error;
1554         }
1555
1556         if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1557                 struct dc_phy_addr_space_config pa_config;
1558
1559                 mmhub_read_system_context(adev, &pa_config);
1560
1561                 // Call the DC init_memory func
1562                 dc_setup_system_context(adev->dm.dc, &pa_config);
1563         }
1564
1565         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1566         if (!adev->dm.freesync_module) {
1567                 DRM_ERROR(
1568                 "amdgpu: failed to initialize freesync_module.\n");
1569         } else
1570                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1571                                 adev->dm.freesync_module);
1572
1573         amdgpu_dm_init_color_mod();
1574
1575         if (adev->dm.dc->caps.max_links > 0) {
1576                 adev->dm.vblank_control_workqueue =
1577                         create_singlethread_workqueue("dm_vblank_control_workqueue");
1578                 if (!adev->dm.vblank_control_workqueue)
1579                         DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1580         }
1581
1582 #ifdef CONFIG_DRM_AMD_DC_HDCP
1583         if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1584                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1585
1586                 if (!adev->dm.hdcp_workqueue)
1587                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1588                 else
1589                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1590
1591                 dc_init_callbacks(adev->dm.dc, &init_params);
1592         }
1593 #endif
1594 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1595         adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1596 #endif
1597         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1598                 init_completion(&adev->dm.dmub_aux_transfer_done);
1599                 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1600                 if (!adev->dm.dmub_notify) {
1601                         DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1602                         goto error;
1603                 }
1604
1605                 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1606                 if (!adev->dm.delayed_hpd_wq) {
1607                         DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1608                         goto error;
1609                 }
1610
1611                 amdgpu_dm_outbox_init(adev);
1612                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1613                         dmub_aux_setconfig_callback, false)) {
1614                         DRM_ERROR("amdgpu: fail to register dmub aux callback");
1615                         goto error;
1616                 }
1617                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1618                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1619                         goto error;
1620                 }
1621                 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1622                         DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1623                         goto error;
1624                 }
1625         }
1626
1627         if (amdgpu_dm_initialize_drm_device(adev)) {
1628                 DRM_ERROR(
1629                 "amdgpu: failed to initialize sw for display support.\n");
1630                 goto error;
1631         }
1632
1633         /* create fake encoders for MST */
1634         dm_dp_create_fake_mst_encoders(adev);
1635
1636         /* TODO: Add_display_info? */
1637
1638         /* TODO use dynamic cursor width */
1639         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1640         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1641
1642         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1643                 DRM_ERROR(
1644                 "amdgpu: failed to initialize sw for display support.\n");
1645                 goto error;
1646         }
1647
1648
1649         DRM_DEBUG_DRIVER("KMS initialized.\n");
1650
1651         return 0;
1652 error:
1653         amdgpu_dm_fini(adev);
1654
1655         return -EINVAL;
1656 }
1657
1658 static int amdgpu_dm_early_fini(void *handle)
1659 {
1660         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1661
1662         amdgpu_dm_audio_fini(adev);
1663
1664         return 0;
1665 }
1666
1667 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1668 {
1669         int i;
1670
1671         if (adev->dm.vblank_control_workqueue) {
1672                 destroy_workqueue(adev->dm.vblank_control_workqueue);
1673                 adev->dm.vblank_control_workqueue = NULL;
1674         }
1675
1676         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1677                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1678         }
1679
1680         amdgpu_dm_destroy_drm_device(&adev->dm);
1681
1682 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1683         if (adev->dm.crc_rd_wrk) {
1684                 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1685                 kfree(adev->dm.crc_rd_wrk);
1686                 adev->dm.crc_rd_wrk = NULL;
1687         }
1688 #endif
1689 #ifdef CONFIG_DRM_AMD_DC_HDCP
1690         if (adev->dm.hdcp_workqueue) {
1691                 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1692                 adev->dm.hdcp_workqueue = NULL;
1693         }
1694
1695         if (adev->dm.dc)
1696                 dc_deinit_callbacks(adev->dm.dc);
1697 #endif
1698
1699         dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1700
1701         if (dc_enable_dmub_notifications(adev->dm.dc)) {
1702                 kfree(adev->dm.dmub_notify);
1703                 adev->dm.dmub_notify = NULL;
1704                 destroy_workqueue(adev->dm.delayed_hpd_wq);
1705                 adev->dm.delayed_hpd_wq = NULL;
1706         }
1707
1708         if (adev->dm.dmub_bo)
1709                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1710                                       &adev->dm.dmub_bo_gpu_addr,
1711                                       &adev->dm.dmub_bo_cpu_addr);
1712
1713         if (adev->dm.hpd_rx_offload_wq) {
1714                 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1715                         if (adev->dm.hpd_rx_offload_wq[i].wq) {
1716                                 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1717                                 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1718                         }
1719                 }
1720
1721                 kfree(adev->dm.hpd_rx_offload_wq);
1722                 adev->dm.hpd_rx_offload_wq = NULL;
1723         }
1724
1725         /* DC Destroy TODO: Replace destroy DAL */
1726         if (adev->dm.dc)
1727                 dc_destroy(&adev->dm.dc);
1728         /*
1729          * TODO: pageflip, vlank interrupt
1730          *
1731          * amdgpu_dm_irq_fini(adev);
1732          */
1733
1734         if (adev->dm.cgs_device) {
1735                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1736                 adev->dm.cgs_device = NULL;
1737         }
1738         if (adev->dm.freesync_module) {
1739                 mod_freesync_destroy(adev->dm.freesync_module);
1740                 adev->dm.freesync_module = NULL;
1741         }
1742
1743         mutex_destroy(&adev->dm.audio_lock);
1744         mutex_destroy(&adev->dm.dc_lock);
1745
1746         return;
1747 }
1748
1749 static int load_dmcu_fw(struct amdgpu_device *adev)
1750 {
1751         const char *fw_name_dmcu = NULL;
1752         int r;
1753         const struct dmcu_firmware_header_v1_0 *hdr;
1754
1755         switch(adev->asic_type) {
1756 #if defined(CONFIG_DRM_AMD_DC_SI)
1757         case CHIP_TAHITI:
1758         case CHIP_PITCAIRN:
1759         case CHIP_VERDE:
1760         case CHIP_OLAND:
1761 #endif
1762         case CHIP_BONAIRE:
1763         case CHIP_HAWAII:
1764         case CHIP_KAVERI:
1765         case CHIP_KABINI:
1766         case CHIP_MULLINS:
1767         case CHIP_TONGA:
1768         case CHIP_FIJI:
1769         case CHIP_CARRIZO:
1770         case CHIP_STONEY:
1771         case CHIP_POLARIS11:
1772         case CHIP_POLARIS10:
1773         case CHIP_POLARIS12:
1774         case CHIP_VEGAM:
1775         case CHIP_VEGA10:
1776         case CHIP_VEGA12:
1777         case CHIP_VEGA20:
1778                 return 0;
1779         case CHIP_NAVI12:
1780                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1781                 break;
1782         case CHIP_RAVEN:
1783                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1784                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1785                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1786                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1787                 else
1788                         return 0;
1789                 break;
1790         default:
1791                 switch (adev->ip_versions[DCE_HWIP][0]) {
1792                 case IP_VERSION(2, 0, 2):
1793                 case IP_VERSION(2, 0, 3):
1794                 case IP_VERSION(2, 0, 0):
1795                 case IP_VERSION(2, 1, 0):
1796                 case IP_VERSION(3, 0, 0):
1797                 case IP_VERSION(3, 0, 2):
1798                 case IP_VERSION(3, 0, 3):
1799                 case IP_VERSION(3, 0, 1):
1800                 case IP_VERSION(3, 1, 2):
1801                 case IP_VERSION(3, 1, 3):
1802                 case IP_VERSION(3, 1, 5):
1803                 case IP_VERSION(3, 1, 6):
1804                         return 0;
1805                 default:
1806                         break;
1807                 }
1808                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1809                 return -EINVAL;
1810         }
1811
1812         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1813                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1814                 return 0;
1815         }
1816
1817         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1818         if (r == -ENOENT) {
1819                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1820                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1821                 adev->dm.fw_dmcu = NULL;
1822                 return 0;
1823         }
1824         if (r) {
1825                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1826                         fw_name_dmcu);
1827                 return r;
1828         }
1829
1830         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1831         if (r) {
1832                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1833                         fw_name_dmcu);
1834                 release_firmware(adev->dm.fw_dmcu);
1835                 adev->dm.fw_dmcu = NULL;
1836                 return r;
1837         }
1838
1839         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1840         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1841         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1842         adev->firmware.fw_size +=
1843                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1844
1845         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1846         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1847         adev->firmware.fw_size +=
1848                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1849
1850         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1851
1852         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1853
1854         return 0;
1855 }
1856
1857 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1858 {
1859         struct amdgpu_device *adev = ctx;
1860
1861         return dm_read_reg(adev->dm.dc->ctx, address);
1862 }
1863
1864 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1865                                      uint32_t value)
1866 {
1867         struct amdgpu_device *adev = ctx;
1868
1869         return dm_write_reg(adev->dm.dc->ctx, address, value);
1870 }
1871
1872 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1873 {
1874         struct dmub_srv_create_params create_params;
1875         struct dmub_srv_region_params region_params;
1876         struct dmub_srv_region_info region_info;
1877         struct dmub_srv_fb_params fb_params;
1878         struct dmub_srv_fb_info *fb_info;
1879         struct dmub_srv *dmub_srv;
1880         const struct dmcub_firmware_header_v1_0 *hdr;
1881         const char *fw_name_dmub;
1882         enum dmub_asic dmub_asic;
1883         enum dmub_status status;
1884         int r;
1885
1886         switch (adev->ip_versions[DCE_HWIP][0]) {
1887         case IP_VERSION(2, 1, 0):
1888                 dmub_asic = DMUB_ASIC_DCN21;
1889                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1890                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1891                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1892                 break;
1893         case IP_VERSION(3, 0, 0):
1894                 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1895                         dmub_asic = DMUB_ASIC_DCN30;
1896                         fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1897                 } else {
1898                         dmub_asic = DMUB_ASIC_DCN30;
1899                         fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1900                 }
1901                 break;
1902         case IP_VERSION(3, 0, 1):
1903                 dmub_asic = DMUB_ASIC_DCN301;
1904                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1905                 break;
1906         case IP_VERSION(3, 0, 2):
1907                 dmub_asic = DMUB_ASIC_DCN302;
1908                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1909                 break;
1910         case IP_VERSION(3, 0, 3):
1911                 dmub_asic = DMUB_ASIC_DCN303;
1912                 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1913                 break;
1914         case IP_VERSION(3, 1, 2):
1915         case IP_VERSION(3, 1, 3):
1916                 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1917                 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1918                 break;
1919         case IP_VERSION(3, 1, 5):
1920                 dmub_asic = DMUB_ASIC_DCN315;
1921                 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
1922                 break;
1923         case IP_VERSION(3, 1, 6):
1924                 dmub_asic = DMUB_ASIC_DCN316;
1925                 fw_name_dmub = FIRMWARE_DCN316_DMUB;
1926                 break;
1927         default:
1928                 /* ASIC doesn't support DMUB. */
1929                 return 0;
1930         }
1931
1932         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1933         if (r) {
1934                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1935                 return 0;
1936         }
1937
1938         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1939         if (r) {
1940                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1941                 return 0;
1942         }
1943
1944         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1945         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1946
1947         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1948                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1949                         AMDGPU_UCODE_ID_DMCUB;
1950                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1951                         adev->dm.dmub_fw;
1952                 adev->firmware.fw_size +=
1953                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1954
1955                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1956                          adev->dm.dmcub_fw_version);
1957         }
1958
1959
1960         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1961         dmub_srv = adev->dm.dmub_srv;
1962
1963         if (!dmub_srv) {
1964                 DRM_ERROR("Failed to allocate DMUB service!\n");
1965                 return -ENOMEM;
1966         }
1967
1968         memset(&create_params, 0, sizeof(create_params));
1969         create_params.user_ctx = adev;
1970         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1971         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1972         create_params.asic = dmub_asic;
1973
1974         /* Create the DMUB service. */
1975         status = dmub_srv_create(dmub_srv, &create_params);
1976         if (status != DMUB_STATUS_OK) {
1977                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1978                 return -EINVAL;
1979         }
1980
1981         /* Calculate the size of all the regions for the DMUB service. */
1982         memset(&region_params, 0, sizeof(region_params));
1983
1984         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1985                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1986         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1987         region_params.vbios_size = adev->bios_size;
1988         region_params.fw_bss_data = region_params.bss_data_size ?
1989                 adev->dm.dmub_fw->data +
1990                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1991                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1992         region_params.fw_inst_const =
1993                 adev->dm.dmub_fw->data +
1994                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1995                 PSP_HEADER_BYTES;
1996
1997         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1998                                            &region_info);
1999
2000         if (status != DMUB_STATUS_OK) {
2001                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2002                 return -EINVAL;
2003         }
2004
2005         /*
2006          * Allocate a framebuffer based on the total size of all the regions.
2007          * TODO: Move this into GART.
2008          */
2009         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2010                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
2011                                     &adev->dm.dmub_bo_gpu_addr,
2012                                     &adev->dm.dmub_bo_cpu_addr);
2013         if (r)
2014                 return r;
2015
2016         /* Rebase the regions on the framebuffer address. */
2017         memset(&fb_params, 0, sizeof(fb_params));
2018         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2019         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2020         fb_params.region_info = &region_info;
2021
2022         adev->dm.dmub_fb_info =
2023                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2024         fb_info = adev->dm.dmub_fb_info;
2025
2026         if (!fb_info) {
2027                 DRM_ERROR(
2028                         "Failed to allocate framebuffer info for DMUB service!\n");
2029                 return -ENOMEM;
2030         }
2031
2032         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2033         if (status != DMUB_STATUS_OK) {
2034                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2035                 return -EINVAL;
2036         }
2037
2038         return 0;
2039 }
2040
2041 static int dm_sw_init(void *handle)
2042 {
2043         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2044         int r;
2045
2046         r = dm_dmub_sw_init(adev);
2047         if (r)
2048                 return r;
2049
2050         return load_dmcu_fw(adev);
2051 }
2052
2053 static int dm_sw_fini(void *handle)
2054 {
2055         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2056
2057         kfree(adev->dm.dmub_fb_info);
2058         adev->dm.dmub_fb_info = NULL;
2059
2060         if (adev->dm.dmub_srv) {
2061                 dmub_srv_destroy(adev->dm.dmub_srv);
2062                 adev->dm.dmub_srv = NULL;
2063         }
2064
2065         release_firmware(adev->dm.dmub_fw);
2066         adev->dm.dmub_fw = NULL;
2067
2068         release_firmware(adev->dm.fw_dmcu);
2069         adev->dm.fw_dmcu = NULL;
2070
2071         return 0;
2072 }
2073
2074 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2075 {
2076         struct amdgpu_dm_connector *aconnector;
2077         struct drm_connector *connector;
2078         struct drm_connector_list_iter iter;
2079         int ret = 0;
2080
2081         drm_connector_list_iter_begin(dev, &iter);
2082         drm_for_each_connector_iter(connector, &iter) {
2083                 aconnector = to_amdgpu_dm_connector(connector);
2084                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2085                     aconnector->mst_mgr.aux) {
2086                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2087                                          aconnector,
2088                                          aconnector->base.base.id);
2089
2090                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2091                         if (ret < 0) {
2092                                 DRM_ERROR("DM_MST: Failed to start MST\n");
2093                                 aconnector->dc_link->type =
2094                                         dc_connection_single;
2095                                 break;
2096                         }
2097                 }
2098         }
2099         drm_connector_list_iter_end(&iter);
2100
2101         return ret;
2102 }
2103
2104 static int dm_late_init(void *handle)
2105 {
2106         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2107
2108         struct dmcu_iram_parameters params;
2109         unsigned int linear_lut[16];
2110         int i;
2111         struct dmcu *dmcu = NULL;
2112
2113         dmcu = adev->dm.dc->res_pool->dmcu;
2114
2115         for (i = 0; i < 16; i++)
2116                 linear_lut[i] = 0xFFFF * i / 15;
2117
2118         params.set = 0;
2119         params.backlight_ramping_override = false;
2120         params.backlight_ramping_start = 0xCCCC;
2121         params.backlight_ramping_reduction = 0xCCCCCCCC;
2122         params.backlight_lut_array_size = 16;
2123         params.backlight_lut_array = linear_lut;
2124
2125         /* Min backlight level after ABM reduction,  Don't allow below 1%
2126          * 0xFFFF x 0.01 = 0x28F
2127          */
2128         params.min_abm_backlight = 0x28F;
2129         /* In the case where abm is implemented on dmcub,
2130         * dmcu object will be null.
2131         * ABM 2.4 and up are implemented on dmcub.
2132         */
2133         if (dmcu) {
2134                 if (!dmcu_load_iram(dmcu, params))
2135                         return -EINVAL;
2136         } else if (adev->dm.dc->ctx->dmub_srv) {
2137                 struct dc_link *edp_links[MAX_NUM_EDP];
2138                 int edp_num;
2139
2140                 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2141                 for (i = 0; i < edp_num; i++) {
2142                         if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2143                                 return -EINVAL;
2144                 }
2145         }
2146
2147         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2148 }
2149
2150 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2151 {
2152         struct amdgpu_dm_connector *aconnector;
2153         struct drm_connector *connector;
2154         struct drm_connector_list_iter iter;
2155         struct drm_dp_mst_topology_mgr *mgr;
2156         int ret;
2157         bool need_hotplug = false;
2158
2159         drm_connector_list_iter_begin(dev, &iter);
2160         drm_for_each_connector_iter(connector, &iter) {
2161                 aconnector = to_amdgpu_dm_connector(connector);
2162                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2163                     aconnector->mst_port)
2164                         continue;
2165
2166                 mgr = &aconnector->mst_mgr;
2167
2168                 if (suspend) {
2169                         drm_dp_mst_topology_mgr_suspend(mgr);
2170                 } else {
2171                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2172                         if (ret < 0) {
2173                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2174                                 need_hotplug = true;
2175                         }
2176                 }
2177         }
2178         drm_connector_list_iter_end(&iter);
2179
2180         if (need_hotplug)
2181                 drm_kms_helper_hotplug_event(dev);
2182 }
2183
2184 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2185 {
2186         int ret = 0;
2187
2188         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2189          * on window driver dc implementation.
2190          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2191          * should be passed to smu during boot up and resume from s3.
2192          * boot up: dc calculate dcn watermark clock settings within dc_create,
2193          * dcn20_resource_construct
2194          * then call pplib functions below to pass the settings to smu:
2195          * smu_set_watermarks_for_clock_ranges
2196          * smu_set_watermarks_table
2197          * navi10_set_watermarks_table
2198          * smu_write_watermarks_table
2199          *
2200          * For Renoir, clock settings of dcn watermark are also fixed values.
2201          * dc has implemented different flow for window driver:
2202          * dc_hardware_init / dc_set_power_state
2203          * dcn10_init_hw
2204          * notify_wm_ranges
2205          * set_wm_ranges
2206          * -- Linux
2207          * smu_set_watermarks_for_clock_ranges
2208          * renoir_set_watermarks_table
2209          * smu_write_watermarks_table
2210          *
2211          * For Linux,
2212          * dc_hardware_init -> amdgpu_dm_init
2213          * dc_set_power_state --> dm_resume
2214          *
2215          * therefore, this function apply to navi10/12/14 but not Renoir
2216          * *
2217          */
2218         switch (adev->ip_versions[DCE_HWIP][0]) {
2219         case IP_VERSION(2, 0, 2):
2220         case IP_VERSION(2, 0, 0):
2221                 break;
2222         default:
2223                 return 0;
2224         }
2225
2226         ret = amdgpu_dpm_write_watermarks_table(adev);
2227         if (ret) {
2228                 DRM_ERROR("Failed to update WMTABLE!\n");
2229                 return ret;
2230         }
2231
2232         return 0;
2233 }
2234
2235 /**
2236  * dm_hw_init() - Initialize DC device
2237  * @handle: The base driver device containing the amdgpu_dm device.
2238  *
2239  * Initialize the &struct amdgpu_display_manager device. This involves calling
2240  * the initializers of each DM component, then populating the struct with them.
2241  *
2242  * Although the function implies hardware initialization, both hardware and
2243  * software are initialized here. Splitting them out to their relevant init
2244  * hooks is a future TODO item.
2245  *
2246  * Some notable things that are initialized here:
2247  *
2248  * - Display Core, both software and hardware
2249  * - DC modules that we need (freesync and color management)
2250  * - DRM software states
2251  * - Interrupt sources and handlers
2252  * - Vblank support
2253  * - Debug FS entries, if enabled
2254  */
2255 static int dm_hw_init(void *handle)
2256 {
2257         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2258         /* Create DAL display manager */
2259         amdgpu_dm_init(adev);
2260         amdgpu_dm_hpd_init(adev);
2261
2262         return 0;
2263 }
2264
2265 /**
2266  * dm_hw_fini() - Teardown DC device
2267  * @handle: The base driver device containing the amdgpu_dm device.
2268  *
2269  * Teardown components within &struct amdgpu_display_manager that require
2270  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2271  * were loaded. Also flush IRQ workqueues and disable them.
2272  */
2273 static int dm_hw_fini(void *handle)
2274 {
2275         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2276
2277         amdgpu_dm_hpd_fini(adev);
2278
2279         amdgpu_dm_irq_fini(adev);
2280         amdgpu_dm_fini(adev);
2281         return 0;
2282 }
2283
2284
2285 static int dm_enable_vblank(struct drm_crtc *crtc);
2286 static void dm_disable_vblank(struct drm_crtc *crtc);
2287
2288 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2289                                  struct dc_state *state, bool enable)
2290 {
2291         enum dc_irq_source irq_source;
2292         struct amdgpu_crtc *acrtc;
2293         int rc = -EBUSY;
2294         int i = 0;
2295
2296         for (i = 0; i < state->stream_count; i++) {
2297                 acrtc = get_crtc_by_otg_inst(
2298                                 adev, state->stream_status[i].primary_otg_inst);
2299
2300                 if (acrtc && state->stream_status[i].plane_count != 0) {
2301                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2302                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2303                         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2304                                       acrtc->crtc_id, enable ? "en" : "dis", rc);
2305                         if (rc)
2306                                 DRM_WARN("Failed to %s pflip interrupts\n",
2307                                          enable ? "enable" : "disable");
2308
2309                         if (enable) {
2310                                 rc = dm_enable_vblank(&acrtc->base);
2311                                 if (rc)
2312                                         DRM_WARN("Failed to enable vblank interrupts\n");
2313                         } else {
2314                                 dm_disable_vblank(&acrtc->base);
2315                         }
2316
2317                 }
2318         }
2319
2320 }
2321
2322 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2323 {
2324         struct dc_state *context = NULL;
2325         enum dc_status res = DC_ERROR_UNEXPECTED;
2326         int i;
2327         struct dc_stream_state *del_streams[MAX_PIPES];
2328         int del_streams_count = 0;
2329
2330         memset(del_streams, 0, sizeof(del_streams));
2331
2332         context = dc_create_state(dc);
2333         if (context == NULL)
2334                 goto context_alloc_fail;
2335
2336         dc_resource_state_copy_construct_current(dc, context);
2337
2338         /* First remove from context all streams */
2339         for (i = 0; i < context->stream_count; i++) {
2340                 struct dc_stream_state *stream = context->streams[i];
2341
2342                 del_streams[del_streams_count++] = stream;
2343         }
2344
2345         /* Remove all planes for removed streams and then remove the streams */
2346         for (i = 0; i < del_streams_count; i++) {
2347                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2348                         res = DC_FAIL_DETACH_SURFACES;
2349                         goto fail;
2350                 }
2351
2352                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2353                 if (res != DC_OK)
2354                         goto fail;
2355         }
2356
2357         res = dc_commit_state(dc, context);
2358
2359 fail:
2360         dc_release_state(context);
2361
2362 context_alloc_fail:
2363         return res;
2364 }
2365
2366 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2367 {
2368         int i;
2369
2370         if (dm->hpd_rx_offload_wq) {
2371                 for (i = 0; i < dm->dc->caps.max_links; i++)
2372                         flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2373         }
2374 }
2375
2376 static int dm_suspend(void *handle)
2377 {
2378         struct amdgpu_device *adev = handle;
2379         struct amdgpu_display_manager *dm = &adev->dm;
2380         int ret = 0;
2381
2382         if (amdgpu_in_reset(adev)) {
2383                 mutex_lock(&dm->dc_lock);
2384
2385                 dc_allow_idle_optimizations(adev->dm.dc, false);
2386
2387                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2388
2389                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2390
2391                 amdgpu_dm_commit_zero_streams(dm->dc);
2392
2393                 amdgpu_dm_irq_suspend(adev);
2394
2395                 hpd_rx_irq_work_suspend(dm);
2396
2397                 return ret;
2398         }
2399
2400         WARN_ON(adev->dm.cached_state);
2401         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2402
2403         s3_handle_mst(adev_to_drm(adev), true);
2404
2405         amdgpu_dm_irq_suspend(adev);
2406
2407         hpd_rx_irq_work_suspend(dm);
2408
2409         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2410
2411         return 0;
2412 }
2413
2414 struct amdgpu_dm_connector *
2415 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2416                                              struct drm_crtc *crtc)
2417 {
2418         uint32_t i;
2419         struct drm_connector_state *new_con_state;
2420         struct drm_connector *connector;
2421         struct drm_crtc *crtc_from_state;
2422
2423         for_each_new_connector_in_state(state, connector, new_con_state, i) {
2424                 crtc_from_state = new_con_state->crtc;
2425
2426                 if (crtc_from_state == crtc)
2427                         return to_amdgpu_dm_connector(connector);
2428         }
2429
2430         return NULL;
2431 }
2432
2433 static void emulated_link_detect(struct dc_link *link)
2434 {
2435         struct dc_sink_init_data sink_init_data = { 0 };
2436         struct display_sink_capability sink_caps = { 0 };
2437         enum dc_edid_status edid_status;
2438         struct dc_context *dc_ctx = link->ctx;
2439         struct dc_sink *sink = NULL;
2440         struct dc_sink *prev_sink = NULL;
2441
2442         link->type = dc_connection_none;
2443         prev_sink = link->local_sink;
2444
2445         if (prev_sink)
2446                 dc_sink_release(prev_sink);
2447
2448         switch (link->connector_signal) {
2449         case SIGNAL_TYPE_HDMI_TYPE_A: {
2450                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2451                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2452                 break;
2453         }
2454
2455         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2456                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2457                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2458                 break;
2459         }
2460
2461         case SIGNAL_TYPE_DVI_DUAL_LINK: {
2462                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2463                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2464                 break;
2465         }
2466
2467         case SIGNAL_TYPE_LVDS: {
2468                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2469                 sink_caps.signal = SIGNAL_TYPE_LVDS;
2470                 break;
2471         }
2472
2473         case SIGNAL_TYPE_EDP: {
2474                 sink_caps.transaction_type =
2475                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2476                 sink_caps.signal = SIGNAL_TYPE_EDP;
2477                 break;
2478         }
2479
2480         case SIGNAL_TYPE_DISPLAY_PORT: {
2481                 sink_caps.transaction_type =
2482                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2483                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2484                 break;
2485         }
2486
2487         default:
2488                 DC_ERROR("Invalid connector type! signal:%d\n",
2489                         link->connector_signal);
2490                 return;
2491         }
2492
2493         sink_init_data.link = link;
2494         sink_init_data.sink_signal = sink_caps.signal;
2495
2496         sink = dc_sink_create(&sink_init_data);
2497         if (!sink) {
2498                 DC_ERROR("Failed to create sink!\n");
2499                 return;
2500         }
2501
2502         /* dc_sink_create returns a new reference */
2503         link->local_sink = sink;
2504
2505         edid_status = dm_helpers_read_local_edid(
2506                         link->ctx,
2507                         link,
2508                         sink);
2509
2510         if (edid_status != EDID_OK)
2511                 DC_ERROR("Failed to read EDID");
2512
2513 }
2514
2515 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2516                                      struct amdgpu_display_manager *dm)
2517 {
2518         struct {
2519                 struct dc_surface_update surface_updates[MAX_SURFACES];
2520                 struct dc_plane_info plane_infos[MAX_SURFACES];
2521                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2522                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2523                 struct dc_stream_update stream_update;
2524         } * bundle;
2525         int k, m;
2526
2527         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2528
2529         if (!bundle) {
2530                 dm_error("Failed to allocate update bundle\n");
2531                 goto cleanup;
2532         }
2533
2534         for (k = 0; k < dc_state->stream_count; k++) {
2535                 bundle->stream_update.stream = dc_state->streams[k];
2536
2537                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2538                         bundle->surface_updates[m].surface =
2539                                 dc_state->stream_status->plane_states[m];
2540                         bundle->surface_updates[m].surface->force_full_update =
2541                                 true;
2542                 }
2543                 dc_commit_updates_for_stream(
2544                         dm->dc, bundle->surface_updates,
2545                         dc_state->stream_status->plane_count,
2546                         dc_state->streams[k], &bundle->stream_update, dc_state);
2547         }
2548
2549 cleanup:
2550         kfree(bundle);
2551
2552         return;
2553 }
2554
2555 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2556 {
2557         struct dc_stream_state *stream_state;
2558         struct amdgpu_dm_connector *aconnector = link->priv;
2559         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2560         struct dc_stream_update stream_update;
2561         bool dpms_off = true;
2562
2563         memset(&stream_update, 0, sizeof(stream_update));
2564         stream_update.dpms_off = &dpms_off;
2565
2566         mutex_lock(&adev->dm.dc_lock);
2567         stream_state = dc_stream_find_from_link(link);
2568
2569         if (stream_state == NULL) {
2570                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2571                 mutex_unlock(&adev->dm.dc_lock);
2572                 return;
2573         }
2574
2575         stream_update.stream = stream_state;
2576         acrtc_state->force_dpms_off = true;
2577         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2578                                      stream_state, &stream_update,
2579                                      stream_state->ctx->dc->current_state);
2580         mutex_unlock(&adev->dm.dc_lock);
2581 }
2582
2583 static int dm_resume(void *handle)
2584 {
2585         struct amdgpu_device *adev = handle;
2586         struct drm_device *ddev = adev_to_drm(adev);
2587         struct amdgpu_display_manager *dm = &adev->dm;
2588         struct amdgpu_dm_connector *aconnector;
2589         struct drm_connector *connector;
2590         struct drm_connector_list_iter iter;
2591         struct drm_crtc *crtc;
2592         struct drm_crtc_state *new_crtc_state;
2593         struct dm_crtc_state *dm_new_crtc_state;
2594         struct drm_plane *plane;
2595         struct drm_plane_state *new_plane_state;
2596         struct dm_plane_state *dm_new_plane_state;
2597         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2598         enum dc_connection_type new_connection_type = dc_connection_none;
2599         struct dc_state *dc_state;
2600         int i, r, j;
2601
2602         if (amdgpu_in_reset(adev)) {
2603                 dc_state = dm->cached_dc_state;
2604
2605                 /*
2606                  * The dc->current_state is backed up into dm->cached_dc_state
2607                  * before we commit 0 streams.
2608                  *
2609                  * DC will clear link encoder assignments on the real state
2610                  * but the changes won't propagate over to the copy we made
2611                  * before the 0 streams commit.
2612                  *
2613                  * DC expects that link encoder assignments are *not* valid
2614                  * when committing a state, so as a workaround we can copy
2615                  * off of the current state.
2616                  *
2617                  * We lose the previous assignments, but we had already
2618                  * commit 0 streams anyway.
2619                  */
2620                 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2621
2622                 if (dc_enable_dmub_notifications(adev->dm.dc))
2623                         amdgpu_dm_outbox_init(adev);
2624
2625                 r = dm_dmub_hw_init(adev);
2626                 if (r)
2627                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2628
2629                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2630                 dc_resume(dm->dc);
2631
2632                 amdgpu_dm_irq_resume_early(adev);
2633
2634                 for (i = 0; i < dc_state->stream_count; i++) {
2635                         dc_state->streams[i]->mode_changed = true;
2636                         for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2637                                 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2638                                         = 0xffffffff;
2639                         }
2640                 }
2641
2642                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2643
2644                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2645
2646                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2647
2648                 dc_release_state(dm->cached_dc_state);
2649                 dm->cached_dc_state = NULL;
2650
2651                 amdgpu_dm_irq_resume_late(adev);
2652
2653                 mutex_unlock(&dm->dc_lock);
2654
2655                 return 0;
2656         }
2657         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2658         dc_release_state(dm_state->context);
2659         dm_state->context = dc_create_state(dm->dc);
2660         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2661         dc_resource_state_construct(dm->dc, dm_state->context);
2662
2663         /* Re-enable outbox interrupts for DPIA. */
2664         if (dc_enable_dmub_notifications(adev->dm.dc))
2665                 amdgpu_dm_outbox_init(adev);
2666
2667         /* Before powering on DC we need to re-initialize DMUB. */
2668         dm_dmub_hw_resume(adev);
2669
2670         /* power on hardware */
2671         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2672
2673         /* program HPD filter */
2674         dc_resume(dm->dc);
2675
2676         /*
2677          * early enable HPD Rx IRQ, should be done before set mode as short
2678          * pulse interrupts are used for MST
2679          */
2680         amdgpu_dm_irq_resume_early(adev);
2681
2682         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2683         s3_handle_mst(ddev, false);
2684
2685         /* Do detection*/
2686         drm_connector_list_iter_begin(ddev, &iter);
2687         drm_for_each_connector_iter(connector, &iter) {
2688                 aconnector = to_amdgpu_dm_connector(connector);
2689
2690                 /*
2691                  * this is the case when traversing through already created
2692                  * MST connectors, should be skipped
2693                  */
2694                 if (aconnector->dc_link &&
2695                     aconnector->dc_link->type == dc_connection_mst_branch)
2696                         continue;
2697
2698                 mutex_lock(&aconnector->hpd_lock);
2699                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2700                         DRM_ERROR("KMS: Failed to detect connector\n");
2701
2702                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2703                         emulated_link_detect(aconnector->dc_link);
2704                 else
2705                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2706
2707                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2708                         aconnector->fake_enable = false;
2709
2710                 if (aconnector->dc_sink)
2711                         dc_sink_release(aconnector->dc_sink);
2712                 aconnector->dc_sink = NULL;
2713                 amdgpu_dm_update_connector_after_detect(aconnector);
2714                 mutex_unlock(&aconnector->hpd_lock);
2715         }
2716         drm_connector_list_iter_end(&iter);
2717
2718         /* Force mode set in atomic commit */
2719         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2720                 new_crtc_state->active_changed = true;
2721
2722         /*
2723          * atomic_check is expected to create the dc states. We need to release
2724          * them here, since they were duplicated as part of the suspend
2725          * procedure.
2726          */
2727         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2728                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2729                 if (dm_new_crtc_state->stream) {
2730                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2731                         dc_stream_release(dm_new_crtc_state->stream);
2732                         dm_new_crtc_state->stream = NULL;
2733                 }
2734         }
2735
2736         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2737                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2738                 if (dm_new_plane_state->dc_state) {
2739                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2740                         dc_plane_state_release(dm_new_plane_state->dc_state);
2741                         dm_new_plane_state->dc_state = NULL;
2742                 }
2743         }
2744
2745         drm_atomic_helper_resume(ddev, dm->cached_state);
2746
2747         dm->cached_state = NULL;
2748
2749         amdgpu_dm_irq_resume_late(adev);
2750
2751         amdgpu_dm_smu_write_watermarks_table(adev);
2752
2753         return 0;
2754 }
2755
2756 /**
2757  * DOC: DM Lifecycle
2758  *
2759  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2760  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2761  * the base driver's device list to be initialized and torn down accordingly.
2762  *
2763  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2764  */
2765
2766 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2767         .name = "dm",
2768         .early_init = dm_early_init,
2769         .late_init = dm_late_init,
2770         .sw_init = dm_sw_init,
2771         .sw_fini = dm_sw_fini,
2772         .early_fini = amdgpu_dm_early_fini,
2773         .hw_init = dm_hw_init,
2774         .hw_fini = dm_hw_fini,
2775         .suspend = dm_suspend,
2776         .resume = dm_resume,
2777         .is_idle = dm_is_idle,
2778         .wait_for_idle = dm_wait_for_idle,
2779         .check_soft_reset = dm_check_soft_reset,
2780         .soft_reset = dm_soft_reset,
2781         .set_clockgating_state = dm_set_clockgating_state,
2782         .set_powergating_state = dm_set_powergating_state,
2783 };
2784
2785 const struct amdgpu_ip_block_version dm_ip_block =
2786 {
2787         .type = AMD_IP_BLOCK_TYPE_DCE,
2788         .major = 1,
2789         .minor = 0,
2790         .rev = 0,
2791         .funcs = &amdgpu_dm_funcs,
2792 };
2793
2794
2795 /**
2796  * DOC: atomic
2797  *
2798  * *WIP*
2799  */
2800
2801 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2802         .fb_create = amdgpu_display_user_framebuffer_create,
2803         .get_format_info = amd_get_format_info,
2804         .output_poll_changed = drm_fb_helper_output_poll_changed,
2805         .atomic_check = amdgpu_dm_atomic_check,
2806         .atomic_commit = drm_atomic_helper_commit,
2807 };
2808
2809 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2810         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2811 };
2812
2813 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2814 {
2815         u32 max_avg, min_cll, max, min, q, r;
2816         struct amdgpu_dm_backlight_caps *caps;
2817         struct amdgpu_display_manager *dm;
2818         struct drm_connector *conn_base;
2819         struct amdgpu_device *adev;
2820         struct dc_link *link = NULL;
2821         static const u8 pre_computed_values[] = {
2822                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2823                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2824         int i;
2825
2826         if (!aconnector || !aconnector->dc_link)
2827                 return;
2828
2829         link = aconnector->dc_link;
2830         if (link->connector_signal != SIGNAL_TYPE_EDP)
2831                 return;
2832
2833         conn_base = &aconnector->base;
2834         adev = drm_to_adev(conn_base->dev);
2835         dm = &adev->dm;
2836         for (i = 0; i < dm->num_of_edps; i++) {
2837                 if (link == dm->backlight_link[i])
2838                         break;
2839         }
2840         if (i >= dm->num_of_edps)
2841                 return;
2842         caps = &dm->backlight_caps[i];
2843         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2844         caps->aux_support = false;
2845         max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2846         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2847
2848         if (caps->ext_caps->bits.oled == 1 /*||
2849             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2850             caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2851                 caps->aux_support = true;
2852
2853         if (amdgpu_backlight == 0)
2854                 caps->aux_support = false;
2855         else if (amdgpu_backlight == 1)
2856                 caps->aux_support = true;
2857
2858         /* From the specification (CTA-861-G), for calculating the maximum
2859          * luminance we need to use:
2860          *      Luminance = 50*2**(CV/32)
2861          * Where CV is a one-byte value.
2862          * For calculating this expression we may need float point precision;
2863          * to avoid this complexity level, we take advantage that CV is divided
2864          * by a constant. From the Euclids division algorithm, we know that CV
2865          * can be written as: CV = 32*q + r. Next, we replace CV in the
2866          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2867          * need to pre-compute the value of r/32. For pre-computing the values
2868          * We just used the following Ruby line:
2869          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2870          * The results of the above expressions can be verified at
2871          * pre_computed_values.
2872          */
2873         q = max_avg >> 5;
2874         r = max_avg % 32;
2875         max = (1 << q) * pre_computed_values[r];
2876
2877         // min luminance: maxLum * (CV/255)^2 / 100
2878         q = DIV_ROUND_CLOSEST(min_cll, 255);
2879         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2880
2881         caps->aux_max_input_signal = max;
2882         caps->aux_min_input_signal = min;
2883 }
2884
2885 void amdgpu_dm_update_connector_after_detect(
2886                 struct amdgpu_dm_connector *aconnector)
2887 {
2888         struct drm_connector *connector = &aconnector->base;
2889         struct drm_device *dev = connector->dev;
2890         struct dc_sink *sink;
2891
2892         /* MST handled by drm_mst framework */
2893         if (aconnector->mst_mgr.mst_state == true)
2894                 return;
2895
2896         sink = aconnector->dc_link->local_sink;
2897         if (sink)
2898                 dc_sink_retain(sink);
2899
2900         /*
2901          * Edid mgmt connector gets first update only in mode_valid hook and then
2902          * the connector sink is set to either fake or physical sink depends on link status.
2903          * Skip if already done during boot.
2904          */
2905         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2906                         && aconnector->dc_em_sink) {
2907
2908                 /*
2909                  * For S3 resume with headless use eml_sink to fake stream
2910                  * because on resume connector->sink is set to NULL
2911                  */
2912                 mutex_lock(&dev->mode_config.mutex);
2913
2914                 if (sink) {
2915                         if (aconnector->dc_sink) {
2916                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2917                                 /*
2918                                  * retain and release below are used to
2919                                  * bump up refcount for sink because the link doesn't point
2920                                  * to it anymore after disconnect, so on next crtc to connector
2921                                  * reshuffle by UMD we will get into unwanted dc_sink release
2922                                  */
2923                                 dc_sink_release(aconnector->dc_sink);
2924                         }
2925                         aconnector->dc_sink = sink;
2926                         dc_sink_retain(aconnector->dc_sink);
2927                         amdgpu_dm_update_freesync_caps(connector,
2928                                         aconnector->edid);
2929                 } else {
2930                         amdgpu_dm_update_freesync_caps(connector, NULL);
2931                         if (!aconnector->dc_sink) {
2932                                 aconnector->dc_sink = aconnector->dc_em_sink;
2933                                 dc_sink_retain(aconnector->dc_sink);
2934                         }
2935                 }
2936
2937                 mutex_unlock(&dev->mode_config.mutex);
2938
2939                 if (sink)
2940                         dc_sink_release(sink);
2941                 return;
2942         }
2943
2944         /*
2945          * TODO: temporary guard to look for proper fix
2946          * if this sink is MST sink, we should not do anything
2947          */
2948         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2949                 dc_sink_release(sink);
2950                 return;
2951         }
2952
2953         if (aconnector->dc_sink == sink) {
2954                 /*
2955                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2956                  * Do nothing!!
2957                  */
2958                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2959                                 aconnector->connector_id);
2960                 if (sink)
2961                         dc_sink_release(sink);
2962                 return;
2963         }
2964
2965         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2966                 aconnector->connector_id, aconnector->dc_sink, sink);
2967
2968         mutex_lock(&dev->mode_config.mutex);
2969
2970         /*
2971          * 1. Update status of the drm connector
2972          * 2. Send an event and let userspace tell us what to do
2973          */
2974         if (sink) {
2975                 /*
2976                  * TODO: check if we still need the S3 mode update workaround.
2977                  * If yes, put it here.
2978                  */
2979                 if (aconnector->dc_sink) {
2980                         amdgpu_dm_update_freesync_caps(connector, NULL);
2981                         dc_sink_release(aconnector->dc_sink);
2982                 }
2983
2984                 aconnector->dc_sink = sink;
2985                 dc_sink_retain(aconnector->dc_sink);
2986                 if (sink->dc_edid.length == 0) {
2987                         aconnector->edid = NULL;
2988                         if (aconnector->dc_link->aux_mode) {
2989                                 drm_dp_cec_unset_edid(
2990                                         &aconnector->dm_dp_aux.aux);
2991                         }
2992                 } else {
2993                         aconnector->edid =
2994                                 (struct edid *)sink->dc_edid.raw_edid;
2995
2996                         if (aconnector->dc_link->aux_mode)
2997                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2998                                                     aconnector->edid);
2999                 }
3000
3001                 drm_connector_update_edid_property(connector, aconnector->edid);
3002                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3003                 update_connector_ext_caps(aconnector);
3004         } else {
3005                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3006                 amdgpu_dm_update_freesync_caps(connector, NULL);
3007                 drm_connector_update_edid_property(connector, NULL);
3008                 aconnector->num_modes = 0;
3009                 dc_sink_release(aconnector->dc_sink);
3010                 aconnector->dc_sink = NULL;
3011                 aconnector->edid = NULL;
3012 #ifdef CONFIG_DRM_AMD_DC_HDCP
3013                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3014                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3015                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3016 #endif
3017         }
3018
3019         mutex_unlock(&dev->mode_config.mutex);
3020
3021         update_subconnector_property(aconnector);
3022
3023         if (sink)
3024                 dc_sink_release(sink);
3025 }
3026
3027 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3028 {
3029         struct drm_connector *connector = &aconnector->base;
3030         struct drm_device *dev = connector->dev;
3031         enum dc_connection_type new_connection_type = dc_connection_none;
3032         struct amdgpu_device *adev = drm_to_adev(dev);
3033         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3034         struct dm_crtc_state *dm_crtc_state = NULL;
3035
3036         if (adev->dm.disable_hpd_irq)
3037                 return;
3038
3039         if (dm_con_state->base.state && dm_con_state->base.crtc)
3040                 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3041                                         dm_con_state->base.state,
3042                                         dm_con_state->base.crtc));
3043         /*
3044          * In case of failure or MST no need to update connector status or notify the OS
3045          * since (for MST case) MST does this in its own context.
3046          */
3047         mutex_lock(&aconnector->hpd_lock);
3048
3049 #ifdef CONFIG_DRM_AMD_DC_HDCP
3050         if (adev->dm.hdcp_workqueue) {
3051                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3052                 dm_con_state->update_hdcp = true;
3053         }
3054 #endif
3055         if (aconnector->fake_enable)
3056                 aconnector->fake_enable = false;
3057
3058         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3059                 DRM_ERROR("KMS: Failed to detect connector\n");
3060
3061         if (aconnector->base.force && new_connection_type == dc_connection_none) {
3062                 emulated_link_detect(aconnector->dc_link);
3063
3064                 drm_modeset_lock_all(dev);
3065                 dm_restore_drm_connector_state(dev, connector);
3066                 drm_modeset_unlock_all(dev);
3067
3068                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3069                         drm_kms_helper_connector_hotplug_event(connector);
3070
3071         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3072                 if (new_connection_type == dc_connection_none &&
3073                     aconnector->dc_link->type == dc_connection_none &&
3074                     dm_crtc_state)
3075                         dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3076
3077                 amdgpu_dm_update_connector_after_detect(aconnector);
3078
3079                 drm_modeset_lock_all(dev);
3080                 dm_restore_drm_connector_state(dev, connector);
3081                 drm_modeset_unlock_all(dev);
3082
3083                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3084                         drm_kms_helper_connector_hotplug_event(connector);
3085         }
3086         mutex_unlock(&aconnector->hpd_lock);
3087
3088 }
3089
3090 static void handle_hpd_irq(void *param)
3091 {
3092         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3093
3094         handle_hpd_irq_helper(aconnector);
3095
3096 }
3097
3098 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3099 {
3100         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3101         uint8_t dret;
3102         bool new_irq_handled = false;
3103         int dpcd_addr;
3104         int dpcd_bytes_to_read;
3105
3106         const int max_process_count = 30;
3107         int process_count = 0;
3108
3109         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3110
3111         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3112                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3113                 /* DPCD 0x200 - 0x201 for downstream IRQ */
3114                 dpcd_addr = DP_SINK_COUNT;
3115         } else {
3116                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3117                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3118                 dpcd_addr = DP_SINK_COUNT_ESI;
3119         }
3120
3121         dret = drm_dp_dpcd_read(
3122                 &aconnector->dm_dp_aux.aux,
3123                 dpcd_addr,
3124                 esi,
3125                 dpcd_bytes_to_read);
3126
3127         while (dret == dpcd_bytes_to_read &&
3128                 process_count < max_process_count) {
3129                 uint8_t retry;
3130                 dret = 0;
3131
3132                 process_count++;
3133
3134                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3135                 /* handle HPD short pulse irq */
3136                 if (aconnector->mst_mgr.mst_state)
3137                         drm_dp_mst_hpd_irq(
3138                                 &aconnector->mst_mgr,
3139                                 esi,
3140                                 &new_irq_handled);
3141
3142                 if (new_irq_handled) {
3143                         /* ACK at DPCD to notify down stream */
3144                         const int ack_dpcd_bytes_to_write =
3145                                 dpcd_bytes_to_read - 1;
3146
3147                         for (retry = 0; retry < 3; retry++) {
3148                                 uint8_t wret;
3149
3150                                 wret = drm_dp_dpcd_write(
3151                                         &aconnector->dm_dp_aux.aux,
3152                                         dpcd_addr + 1,
3153                                         &esi[1],
3154                                         ack_dpcd_bytes_to_write);
3155                                 if (wret == ack_dpcd_bytes_to_write)
3156                                         break;
3157                         }
3158
3159                         /* check if there is new irq to be handled */
3160                         dret = drm_dp_dpcd_read(
3161                                 &aconnector->dm_dp_aux.aux,
3162                                 dpcd_addr,
3163                                 esi,
3164                                 dpcd_bytes_to_read);
3165
3166                         new_irq_handled = false;
3167                 } else {
3168                         break;
3169                 }
3170         }
3171
3172         if (process_count == max_process_count)
3173                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3174 }
3175
3176 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3177                                                         union hpd_irq_data hpd_irq_data)
3178 {
3179         struct hpd_rx_irq_offload_work *offload_work =
3180                                 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3181
3182         if (!offload_work) {
3183                 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3184                 return;
3185         }
3186
3187         INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3188         offload_work->data = hpd_irq_data;
3189         offload_work->offload_wq = offload_wq;
3190
3191         queue_work(offload_wq->wq, &offload_work->work);
3192         DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3193 }
3194
3195 static void handle_hpd_rx_irq(void *param)
3196 {
3197         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3198         struct drm_connector *connector = &aconnector->base;
3199         struct drm_device *dev = connector->dev;
3200         struct dc_link *dc_link = aconnector->dc_link;
3201         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3202         bool result = false;
3203         enum dc_connection_type new_connection_type = dc_connection_none;
3204         struct amdgpu_device *adev = drm_to_adev(dev);
3205         union hpd_irq_data hpd_irq_data;
3206         bool link_loss = false;
3207         bool has_left_work = false;
3208         int idx = aconnector->base.index;
3209         struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3210
3211         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3212
3213         if (adev->dm.disable_hpd_irq)
3214                 return;
3215
3216         /*
3217          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3218          * conflict, after implement i2c helper, this mutex should be
3219          * retired.
3220          */
3221         mutex_lock(&aconnector->hpd_lock);
3222
3223         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3224                                                 &link_loss, true, &has_left_work);
3225
3226         if (!has_left_work)
3227                 goto out;
3228
3229         if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3230                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3231                 goto out;
3232         }
3233
3234         if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3235                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3236                         hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3237                         dm_handle_mst_sideband_msg(aconnector);
3238                         goto out;
3239                 }
3240
3241                 if (link_loss) {
3242                         bool skip = false;
3243
3244                         spin_lock(&offload_wq->offload_lock);
3245                         skip = offload_wq->is_handling_link_loss;
3246
3247                         if (!skip)
3248                                 offload_wq->is_handling_link_loss = true;
3249
3250                         spin_unlock(&offload_wq->offload_lock);
3251
3252                         if (!skip)
3253                                 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3254
3255                         goto out;
3256                 }
3257         }
3258
3259 out:
3260         if (result && !is_mst_root_connector) {
3261                 /* Downstream Port status changed. */
3262                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3263                         DRM_ERROR("KMS: Failed to detect connector\n");
3264
3265                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3266                         emulated_link_detect(dc_link);
3267
3268                         if (aconnector->fake_enable)
3269                                 aconnector->fake_enable = false;
3270
3271                         amdgpu_dm_update_connector_after_detect(aconnector);
3272
3273
3274                         drm_modeset_lock_all(dev);
3275                         dm_restore_drm_connector_state(dev, connector);
3276                         drm_modeset_unlock_all(dev);
3277
3278                         drm_kms_helper_connector_hotplug_event(connector);
3279                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3280
3281                         if (aconnector->fake_enable)
3282                                 aconnector->fake_enable = false;
3283
3284                         amdgpu_dm_update_connector_after_detect(aconnector);
3285
3286
3287                         drm_modeset_lock_all(dev);
3288                         dm_restore_drm_connector_state(dev, connector);
3289                         drm_modeset_unlock_all(dev);
3290
3291                         drm_kms_helper_connector_hotplug_event(connector);
3292                 }
3293         }
3294 #ifdef CONFIG_DRM_AMD_DC_HDCP
3295         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3296                 if (adev->dm.hdcp_workqueue)
3297                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3298         }
3299 #endif
3300
3301         if (dc_link->type != dc_connection_mst_branch)
3302                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3303
3304         mutex_unlock(&aconnector->hpd_lock);
3305 }
3306
3307 static void register_hpd_handlers(struct amdgpu_device *adev)
3308 {
3309         struct drm_device *dev = adev_to_drm(adev);
3310         struct drm_connector *connector;
3311         struct amdgpu_dm_connector *aconnector;
3312         const struct dc_link *dc_link;
3313         struct dc_interrupt_params int_params = {0};
3314
3315         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3316         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3317
3318         list_for_each_entry(connector,
3319                         &dev->mode_config.connector_list, head) {
3320
3321                 aconnector = to_amdgpu_dm_connector(connector);
3322                 dc_link = aconnector->dc_link;
3323
3324                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3325                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3326                         int_params.irq_source = dc_link->irq_source_hpd;
3327
3328                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3329                                         handle_hpd_irq,
3330                                         (void *) aconnector);
3331                 }
3332
3333                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3334
3335                         /* Also register for DP short pulse (hpd_rx). */
3336                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3337                         int_params.irq_source = dc_link->irq_source_hpd_rx;
3338
3339                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
3340                                         handle_hpd_rx_irq,
3341                                         (void *) aconnector);
3342
3343                         if (adev->dm.hpd_rx_offload_wq)
3344                                 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3345                                         aconnector;
3346                 }
3347         }
3348 }
3349
3350 #if defined(CONFIG_DRM_AMD_DC_SI)
3351 /* Register IRQ sources and initialize IRQ callbacks */
3352 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3353 {
3354         struct dc *dc = adev->dm.dc;
3355         struct common_irq_params *c_irq_params;
3356         struct dc_interrupt_params int_params = {0};
3357         int r;
3358         int i;
3359         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3360
3361         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3362         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3363
3364         /*
3365          * Actions of amdgpu_irq_add_id():
3366          * 1. Register a set() function with base driver.
3367          *    Base driver will call set() function to enable/disable an
3368          *    interrupt in DC hardware.
3369          * 2. Register amdgpu_dm_irq_handler().
3370          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3371          *    coming from DC hardware.
3372          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3373          *    for acknowledging and handling. */
3374
3375         /* Use VBLANK interrupt */
3376         for (i = 0; i < adev->mode_info.num_crtc; i++) {
3377                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3378                 if (r) {
3379                         DRM_ERROR("Failed to add crtc irq id!\n");
3380                         return r;
3381                 }
3382
3383                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3384                 int_params.irq_source =
3385                         dc_interrupt_to_irq_source(dc, i+1 , 0);
3386
3387                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3388
3389                 c_irq_params->adev = adev;
3390                 c_irq_params->irq_src = int_params.irq_source;
3391
3392                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3393                                 dm_crtc_high_irq, c_irq_params);
3394         }
3395
3396         /* Use GRPH_PFLIP interrupt */
3397         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3398                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3399                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3400                 if (r) {
3401                         DRM_ERROR("Failed to add page flip irq id!\n");
3402                         return r;
3403                 }
3404
3405                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3406                 int_params.irq_source =
3407                         dc_interrupt_to_irq_source(dc, i, 0);
3408
3409                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3410
3411                 c_irq_params->adev = adev;
3412                 c_irq_params->irq_src = int_params.irq_source;
3413
3414                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3415                                 dm_pflip_high_irq, c_irq_params);
3416
3417         }
3418
3419         /* HPD */
3420         r = amdgpu_irq_add_id(adev, client_id,
3421                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3422         if (r) {
3423                 DRM_ERROR("Failed to add hpd irq id!\n");
3424                 return r;
3425         }
3426
3427         register_hpd_handlers(adev);
3428
3429         return 0;
3430 }
3431 #endif
3432
3433 /* Register IRQ sources and initialize IRQ callbacks */
3434 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3435 {
3436         struct dc *dc = adev->dm.dc;
3437         struct common_irq_params *c_irq_params;
3438         struct dc_interrupt_params int_params = {0};
3439         int r;
3440         int i;
3441         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3442
3443         if (adev->family >= AMDGPU_FAMILY_AI)
3444                 client_id = SOC15_IH_CLIENTID_DCE;
3445
3446         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3447         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3448
3449         /*
3450          * Actions of amdgpu_irq_add_id():
3451          * 1. Register a set() function with base driver.
3452          *    Base driver will call set() function to enable/disable an
3453          *    interrupt in DC hardware.
3454          * 2. Register amdgpu_dm_irq_handler().
3455          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3456          *    coming from DC hardware.
3457          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3458          *    for acknowledging and handling. */
3459
3460         /* Use VBLANK interrupt */
3461         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3462                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3463                 if (r) {
3464                         DRM_ERROR("Failed to add crtc irq id!\n");
3465                         return r;
3466                 }
3467
3468                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3469                 int_params.irq_source =
3470                         dc_interrupt_to_irq_source(dc, i, 0);
3471
3472                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3473
3474                 c_irq_params->adev = adev;
3475                 c_irq_params->irq_src = int_params.irq_source;
3476
3477                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3478                                 dm_crtc_high_irq, c_irq_params);
3479         }
3480
3481         /* Use VUPDATE interrupt */
3482         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3483                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3484                 if (r) {
3485                         DRM_ERROR("Failed to add vupdate irq id!\n");
3486                         return r;
3487                 }
3488
3489                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3490                 int_params.irq_source =
3491                         dc_interrupt_to_irq_source(dc, i, 0);
3492
3493                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3494
3495                 c_irq_params->adev = adev;
3496                 c_irq_params->irq_src = int_params.irq_source;
3497
3498                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3499                                 dm_vupdate_high_irq, c_irq_params);
3500         }
3501
3502         /* Use GRPH_PFLIP interrupt */
3503         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3504                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3505                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3506                 if (r) {
3507                         DRM_ERROR("Failed to add page flip irq id!\n");
3508                         return r;
3509                 }
3510
3511                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3512                 int_params.irq_source =
3513                         dc_interrupt_to_irq_source(dc, i, 0);
3514
3515                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3516
3517                 c_irq_params->adev = adev;
3518                 c_irq_params->irq_src = int_params.irq_source;
3519
3520                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3521                                 dm_pflip_high_irq, c_irq_params);
3522
3523         }
3524
3525         /* HPD */
3526         r = amdgpu_irq_add_id(adev, client_id,
3527                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3528         if (r) {
3529                 DRM_ERROR("Failed to add hpd irq id!\n");
3530                 return r;
3531         }
3532
3533         register_hpd_handlers(adev);
3534
3535         return 0;
3536 }
3537
3538 /* Register IRQ sources and initialize IRQ callbacks */
3539 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3540 {
3541         struct dc *dc = adev->dm.dc;
3542         struct common_irq_params *c_irq_params;
3543         struct dc_interrupt_params int_params = {0};
3544         int r;
3545         int i;
3546 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3547         static const unsigned int vrtl_int_srcid[] = {
3548                 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3549                 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3550                 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3551                 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3552                 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3553                 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3554         };
3555 #endif
3556
3557         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3558         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3559
3560         /*
3561          * Actions of amdgpu_irq_add_id():
3562          * 1. Register a set() function with base driver.
3563          *    Base driver will call set() function to enable/disable an
3564          *    interrupt in DC hardware.
3565          * 2. Register amdgpu_dm_irq_handler().
3566          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3567          *    coming from DC hardware.
3568          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3569          *    for acknowledging and handling.
3570          */
3571
3572         /* Use VSTARTUP interrupt */
3573         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3574                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3575                         i++) {
3576                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3577
3578                 if (r) {
3579                         DRM_ERROR("Failed to add crtc irq id!\n");
3580                         return r;
3581                 }
3582
3583                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3584                 int_params.irq_source =
3585                         dc_interrupt_to_irq_source(dc, i, 0);
3586
3587                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3588
3589                 c_irq_params->adev = adev;
3590                 c_irq_params->irq_src = int_params.irq_source;
3591
3592                 amdgpu_dm_irq_register_interrupt(
3593                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
3594         }
3595
3596         /* Use otg vertical line interrupt */
3597 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3598         for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3599                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3600                                 vrtl_int_srcid[i], &adev->vline0_irq);
3601
3602                 if (r) {
3603                         DRM_ERROR("Failed to add vline0 irq id!\n");
3604                         return r;
3605                 }
3606
3607                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608                 int_params.irq_source =
3609                         dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3610
3611                 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3612                         DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3613                         break;
3614                 }
3615
3616                 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3617                                         - DC_IRQ_SOURCE_DC1_VLINE0];
3618
3619                 c_irq_params->adev = adev;
3620                 c_irq_params->irq_src = int_params.irq_source;
3621
3622                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3623                                 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3624         }
3625 #endif
3626
3627         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3628          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3629          * to trigger at end of each vblank, regardless of state of the lock,
3630          * matching DCE behaviour.
3631          */
3632         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3633              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3634              i++) {
3635                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3636
3637                 if (r) {
3638                         DRM_ERROR("Failed to add vupdate irq id!\n");
3639                         return r;
3640                 }
3641
3642                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3643                 int_params.irq_source =
3644                         dc_interrupt_to_irq_source(dc, i, 0);
3645
3646                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3647
3648                 c_irq_params->adev = adev;
3649                 c_irq_params->irq_src = int_params.irq_source;
3650
3651                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3652                                 dm_vupdate_high_irq, c_irq_params);
3653         }
3654
3655         /* Use GRPH_PFLIP interrupt */
3656         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3657                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3658                         i++) {
3659                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3660                 if (r) {
3661                         DRM_ERROR("Failed to add page flip irq id!\n");
3662                         return r;
3663                 }
3664
3665                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3666                 int_params.irq_source =
3667                         dc_interrupt_to_irq_source(dc, i, 0);
3668
3669                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3670
3671                 c_irq_params->adev = adev;
3672                 c_irq_params->irq_src = int_params.irq_source;
3673
3674                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3675                                 dm_pflip_high_irq, c_irq_params);
3676
3677         }
3678
3679         /* HPD */
3680         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3681                         &adev->hpd_irq);
3682         if (r) {
3683                 DRM_ERROR("Failed to add hpd irq id!\n");
3684                 return r;
3685         }
3686
3687         register_hpd_handlers(adev);
3688
3689         return 0;
3690 }
3691 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3692 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3693 {
3694         struct dc *dc = adev->dm.dc;
3695         struct common_irq_params *c_irq_params;
3696         struct dc_interrupt_params int_params = {0};
3697         int r, i;
3698
3699         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3700         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3701
3702         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3703                         &adev->dmub_outbox_irq);
3704         if (r) {
3705                 DRM_ERROR("Failed to add outbox irq id!\n");
3706                 return r;
3707         }
3708
3709         if (dc->ctx->dmub_srv) {
3710                 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3711                 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3712                 int_params.irq_source =
3713                 dc_interrupt_to_irq_source(dc, i, 0);
3714
3715                 c_irq_params = &adev->dm.dmub_outbox_params[0];
3716
3717                 c_irq_params->adev = adev;
3718                 c_irq_params->irq_src = int_params.irq_source;
3719
3720                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3721                                 dm_dmub_outbox1_low_irq, c_irq_params);
3722         }
3723
3724         return 0;
3725 }
3726
3727 /*
3728  * Acquires the lock for the atomic state object and returns
3729  * the new atomic state.
3730  *
3731  * This should only be called during atomic check.
3732  */
3733 int dm_atomic_get_state(struct drm_atomic_state *state,
3734                         struct dm_atomic_state **dm_state)
3735 {
3736         struct drm_device *dev = state->dev;
3737         struct amdgpu_device *adev = drm_to_adev(dev);
3738         struct amdgpu_display_manager *dm = &adev->dm;
3739         struct drm_private_state *priv_state;
3740
3741         if (*dm_state)
3742                 return 0;
3743
3744         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3745         if (IS_ERR(priv_state))
3746                 return PTR_ERR(priv_state);
3747
3748         *dm_state = to_dm_atomic_state(priv_state);
3749
3750         return 0;
3751 }
3752
3753 static struct dm_atomic_state *
3754 dm_atomic_get_new_state(struct drm_atomic_state *state)
3755 {
3756         struct drm_device *dev = state->dev;
3757         struct amdgpu_device *adev = drm_to_adev(dev);
3758         struct amdgpu_display_manager *dm = &adev->dm;
3759         struct drm_private_obj *obj;
3760         struct drm_private_state *new_obj_state;
3761         int i;
3762
3763         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3764                 if (obj->funcs == dm->atomic_obj.funcs)
3765                         return to_dm_atomic_state(new_obj_state);
3766         }
3767
3768         return NULL;
3769 }
3770
3771 static struct drm_private_state *
3772 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3773 {
3774         struct dm_atomic_state *old_state, *new_state;
3775
3776         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3777         if (!new_state)
3778                 return NULL;
3779
3780         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3781
3782         old_state = to_dm_atomic_state(obj->state);
3783
3784         if (old_state && old_state->context)
3785                 new_state->context = dc_copy_state(old_state->context);
3786
3787         if (!new_state->context) {
3788                 kfree(new_state);
3789                 return NULL;
3790         }
3791
3792         return &new_state->base;
3793 }
3794
3795 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3796                                     struct drm_private_state *state)
3797 {
3798         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3799
3800         if (dm_state && dm_state->context)
3801                 dc_release_state(dm_state->context);
3802
3803         kfree(dm_state);
3804 }
3805
3806 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3807         .atomic_duplicate_state = dm_atomic_duplicate_state,
3808         .atomic_destroy_state = dm_atomic_destroy_state,
3809 };
3810
3811 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3812 {
3813         struct dm_atomic_state *state;
3814         int r;
3815
3816         adev->mode_info.mode_config_initialized = true;
3817
3818         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3819         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3820
3821         adev_to_drm(adev)->mode_config.max_width = 16384;
3822         adev_to_drm(adev)->mode_config.max_height = 16384;
3823
3824         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3825         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3826         /* indicates support for immediate flip */
3827         adev_to_drm(adev)->mode_config.async_page_flip = true;
3828
3829         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3830
3831         state = kzalloc(sizeof(*state), GFP_KERNEL);
3832         if (!state)
3833                 return -ENOMEM;
3834
3835         state->context = dc_create_state(adev->dm.dc);
3836         if (!state->context) {
3837                 kfree(state);
3838                 return -ENOMEM;
3839         }
3840
3841         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3842
3843         drm_atomic_private_obj_init(adev_to_drm(adev),
3844                                     &adev->dm.atomic_obj,
3845                                     &state->base,
3846                                     &dm_atomic_state_funcs);
3847
3848         r = amdgpu_display_modeset_create_props(adev);
3849         if (r) {
3850                 dc_release_state(state->context);
3851                 kfree(state);
3852                 return r;
3853         }
3854
3855         r = amdgpu_dm_audio_init(adev);
3856         if (r) {
3857                 dc_release_state(state->context);
3858                 kfree(state);
3859                 return r;
3860         }
3861
3862         return 0;
3863 }
3864
3865 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3866 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3867 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3868
3869 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3870         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3871
3872 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3873                                             int bl_idx)
3874 {
3875 #if defined(CONFIG_ACPI)
3876         struct amdgpu_dm_backlight_caps caps;
3877
3878         memset(&caps, 0, sizeof(caps));
3879
3880         if (dm->backlight_caps[bl_idx].caps_valid)
3881                 return;
3882
3883         amdgpu_acpi_get_backlight_caps(&caps);
3884         if (caps.caps_valid) {
3885                 dm->backlight_caps[bl_idx].caps_valid = true;
3886                 if (caps.aux_support)
3887                         return;
3888                 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3889                 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3890         } else {
3891                 dm->backlight_caps[bl_idx].min_input_signal =
3892                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3893                 dm->backlight_caps[bl_idx].max_input_signal =
3894                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3895         }
3896 #else
3897         if (dm->backlight_caps[bl_idx].aux_support)
3898                 return;
3899
3900         dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3901         dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3902 #endif
3903 }
3904
3905 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3906                                 unsigned *min, unsigned *max)
3907 {
3908         if (!caps)
3909                 return 0;
3910
3911         if (caps->aux_support) {
3912                 // Firmware limits are in nits, DC API wants millinits.
3913                 *max = 1000 * caps->aux_max_input_signal;
3914                 *min = 1000 * caps->aux_min_input_signal;
3915         } else {
3916                 // Firmware limits are 8-bit, PWM control is 16-bit.
3917                 *max = 0x101 * caps->max_input_signal;
3918                 *min = 0x101 * caps->min_input_signal;
3919         }
3920         return 1;
3921 }
3922
3923 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3924                                         uint32_t brightness)
3925 {
3926         unsigned min, max;
3927
3928         if (!get_brightness_range(caps, &min, &max))
3929                 return brightness;
3930
3931         // Rescale 0..255 to min..max
3932         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3933                                        AMDGPU_MAX_BL_LEVEL);
3934 }
3935
3936 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3937                                       uint32_t brightness)
3938 {
3939         unsigned min, max;
3940
3941         if (!get_brightness_range(caps, &min, &max))
3942                 return brightness;
3943
3944         if (brightness < min)
3945                 return 0;
3946         // Rescale min..max to 0..255
3947         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3948                                  max - min);
3949 }
3950
3951 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3952                                          int bl_idx,
3953                                          u32 user_brightness)
3954 {
3955         struct amdgpu_dm_backlight_caps caps;
3956         struct dc_link *link;
3957         u32 brightness;
3958         bool rc;
3959
3960         amdgpu_dm_update_backlight_caps(dm, bl_idx);
3961         caps = dm->backlight_caps[bl_idx];
3962
3963         dm->brightness[bl_idx] = user_brightness;
3964         /* update scratch register */
3965         if (bl_idx == 0)
3966                 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3967         brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3968         link = (struct dc_link *)dm->backlight_link[bl_idx];
3969
3970         /* Change brightness based on AUX property */
3971         if (caps.aux_support) {
3972                 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3973                                                       AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3974                 if (!rc)
3975                         DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3976         } else {
3977                 rc = dc_link_set_backlight_level(link, brightness, 0);
3978                 if (!rc)
3979                         DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3980         }
3981
3982         if (rc)
3983                 dm->actual_brightness[bl_idx] = user_brightness;
3984 }
3985
3986 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3987 {
3988         struct amdgpu_display_manager *dm = bl_get_data(bd);
3989         int i;
3990
3991         for (i = 0; i < dm->num_of_edps; i++) {
3992                 if (bd == dm->backlight_dev[i])
3993                         break;
3994         }
3995         if (i >= AMDGPU_DM_MAX_NUM_EDP)
3996                 i = 0;
3997         amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3998
3999         return 0;
4000 }
4001
4002 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4003                                          int bl_idx)
4004 {
4005         struct amdgpu_dm_backlight_caps caps;
4006         struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4007
4008         amdgpu_dm_update_backlight_caps(dm, bl_idx);
4009         caps = dm->backlight_caps[bl_idx];
4010
4011         if (caps.aux_support) {
4012                 u32 avg, peak;
4013                 bool rc;
4014
4015                 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4016                 if (!rc)
4017                         return dm->brightness[bl_idx];
4018                 return convert_brightness_to_user(&caps, avg);
4019         } else {
4020                 int ret = dc_link_get_backlight_level(link);
4021
4022                 if (ret == DC_ERROR_UNEXPECTED)
4023                         return dm->brightness[bl_idx];
4024                 return convert_brightness_to_user(&caps, ret);
4025         }
4026 }
4027
4028 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4029 {
4030         struct amdgpu_display_manager *dm = bl_get_data(bd);
4031         int i;
4032
4033         for (i = 0; i < dm->num_of_edps; i++) {
4034                 if (bd == dm->backlight_dev[i])
4035                         break;
4036         }
4037         if (i >= AMDGPU_DM_MAX_NUM_EDP)
4038                 i = 0;
4039         return amdgpu_dm_backlight_get_level(dm, i);
4040 }
4041
4042 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4043         .options = BL_CORE_SUSPENDRESUME,
4044         .get_brightness = amdgpu_dm_backlight_get_brightness,
4045         .update_status  = amdgpu_dm_backlight_update_status,
4046 };
4047
4048 static void
4049 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4050 {
4051         char bl_name[16];
4052         struct backlight_properties props = { 0 };
4053
4054         amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4055         dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4056
4057         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4058         props.brightness = AMDGPU_MAX_BL_LEVEL;
4059         props.type = BACKLIGHT_RAW;
4060
4061         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4062                  adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4063
4064         dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4065                                                                        adev_to_drm(dm->adev)->dev,
4066                                                                        dm,
4067                                                                        &amdgpu_dm_backlight_ops,
4068                                                                        &props);
4069
4070         if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4071                 DRM_ERROR("DM: Backlight registration failed!\n");
4072         else
4073                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4074 }
4075 #endif
4076
4077 static int initialize_plane(struct amdgpu_display_manager *dm,
4078                             struct amdgpu_mode_info *mode_info, int plane_id,
4079                             enum drm_plane_type plane_type,
4080                             const struct dc_plane_cap *plane_cap)
4081 {
4082         struct drm_plane *plane;
4083         unsigned long possible_crtcs;
4084         int ret = 0;
4085
4086         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4087         if (!plane) {
4088                 DRM_ERROR("KMS: Failed to allocate plane\n");
4089                 return -ENOMEM;
4090         }
4091         plane->type = plane_type;
4092
4093         /*
4094          * HACK: IGT tests expect that the primary plane for a CRTC
4095          * can only have one possible CRTC. Only expose support for
4096          * any CRTC if they're not going to be used as a primary plane
4097          * for a CRTC - like overlay or underlay planes.
4098          */
4099         possible_crtcs = 1 << plane_id;
4100         if (plane_id >= dm->dc->caps.max_streams)
4101                 possible_crtcs = 0xff;
4102
4103         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4104
4105         if (ret) {
4106                 DRM_ERROR("KMS: Failed to initialize plane\n");
4107                 kfree(plane);
4108                 return ret;
4109         }
4110
4111         if (mode_info)
4112                 mode_info->planes[plane_id] = plane;
4113
4114         return ret;
4115 }
4116
4117
4118 static void register_backlight_device(struct amdgpu_display_manager *dm,
4119                                       struct dc_link *link)
4120 {
4121 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4122         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4123
4124         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4125             link->type != dc_connection_none) {
4126                 /*
4127                  * Event if registration failed, we should continue with
4128                  * DM initialization because not having a backlight control
4129                  * is better then a black screen.
4130                  */
4131                 if (!dm->backlight_dev[dm->num_of_edps])
4132                         amdgpu_dm_register_backlight_device(dm);
4133
4134                 if (dm->backlight_dev[dm->num_of_edps]) {
4135                         dm->backlight_link[dm->num_of_edps] = link;
4136                         dm->num_of_edps++;
4137                 }
4138         }
4139 #endif
4140 }
4141
4142
4143 /*
4144  * In this architecture, the association
4145  * connector -> encoder -> crtc
4146  * id not really requried. The crtc and connector will hold the
4147  * display_index as an abstraction to use with DAL component
4148  *
4149  * Returns 0 on success
4150  */
4151 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4152 {
4153         struct amdgpu_display_manager *dm = &adev->dm;
4154         int32_t i;
4155         struct amdgpu_dm_connector *aconnector = NULL;
4156         struct amdgpu_encoder *aencoder = NULL;
4157         struct amdgpu_mode_info *mode_info = &adev->mode_info;
4158         uint32_t link_cnt;
4159         int32_t primary_planes;
4160         enum dc_connection_type new_connection_type = dc_connection_none;
4161         const struct dc_plane_cap *plane;
4162         bool psr_feature_enabled = false;
4163
4164         dm->display_indexes_num = dm->dc->caps.max_streams;
4165         /* Update the actual used number of crtc */
4166         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4167
4168         link_cnt = dm->dc->caps.max_links;
4169         if (amdgpu_dm_mode_config_init(dm->adev)) {
4170                 DRM_ERROR("DM: Failed to initialize mode config\n");
4171                 return -EINVAL;
4172         }
4173
4174         /* There is one primary plane per CRTC */
4175         primary_planes = dm->dc->caps.max_streams;
4176         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4177
4178         /*
4179          * Initialize primary planes, implicit planes for legacy IOCTLS.
4180          * Order is reversed to match iteration order in atomic check.
4181          */
4182         for (i = (primary_planes - 1); i >= 0; i--) {
4183                 plane = &dm->dc->caps.planes[i];
4184
4185                 if (initialize_plane(dm, mode_info, i,
4186                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
4187                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
4188                         goto fail;
4189                 }
4190         }
4191
4192         /*
4193          * Initialize overlay planes, index starting after primary planes.
4194          * These planes have a higher DRM index than the primary planes since
4195          * they should be considered as having a higher z-order.
4196          * Order is reversed to match iteration order in atomic check.
4197          *
4198          * Only support DCN for now, and only expose one so we don't encourage
4199          * userspace to use up all the pipes.
4200          */
4201         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4202                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4203
4204                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4205                         continue;
4206
4207                 if (!plane->blends_with_above || !plane->blends_with_below)
4208                         continue;
4209
4210                 if (!plane->pixel_format_support.argb8888)
4211                         continue;
4212
4213                 if (initialize_plane(dm, NULL, primary_planes + i,
4214                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
4215                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4216                         goto fail;
4217                 }
4218
4219                 /* Only create one overlay plane. */
4220                 break;
4221         }
4222
4223         for (i = 0; i < dm->dc->caps.max_streams; i++)
4224                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4225                         DRM_ERROR("KMS: Failed to initialize crtc\n");
4226                         goto fail;
4227                 }
4228
4229         /* Use Outbox interrupt */
4230         switch (adev->ip_versions[DCE_HWIP][0]) {
4231         case IP_VERSION(3, 0, 0):
4232         case IP_VERSION(3, 1, 2):
4233         case IP_VERSION(3, 1, 3):
4234         case IP_VERSION(3, 1, 5):
4235         case IP_VERSION(3, 1, 6):
4236         case IP_VERSION(2, 1, 0):
4237                 if (register_outbox_irq_handlers(dm->adev)) {
4238                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4239                         goto fail;
4240                 }
4241                 break;
4242         default:
4243                 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4244                               adev->ip_versions[DCE_HWIP][0]);
4245         }
4246
4247         /* Determine whether to enable PSR support by default. */
4248         if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4249                 switch (adev->ip_versions[DCE_HWIP][0]) {
4250                 case IP_VERSION(3, 1, 2):
4251                 case IP_VERSION(3, 1, 3):
4252                 case IP_VERSION(3, 1, 5):
4253                 case IP_VERSION(3, 1, 6):
4254                         psr_feature_enabled = true;
4255                         break;
4256                 default:
4257                         psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4258                         break;
4259                 }
4260         }
4261
4262         /* loops over all connectors on the board */
4263         for (i = 0; i < link_cnt; i++) {
4264                 struct dc_link *link = NULL;
4265
4266                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4267                         DRM_ERROR(
4268                                 "KMS: Cannot support more than %d display indexes\n",
4269                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
4270                         continue;
4271                 }
4272
4273                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4274                 if (!aconnector)
4275                         goto fail;
4276
4277                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4278                 if (!aencoder)
4279                         goto fail;
4280
4281                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4282                         DRM_ERROR("KMS: Failed to initialize encoder\n");
4283                         goto fail;
4284                 }
4285
4286                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4287                         DRM_ERROR("KMS: Failed to initialize connector\n");
4288                         goto fail;
4289                 }
4290
4291                 link = dc_get_link_at_index(dm->dc, i);
4292
4293                 if (!dc_link_detect_sink(link, &new_connection_type))
4294                         DRM_ERROR("KMS: Failed to detect connector\n");
4295
4296                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4297                         emulated_link_detect(link);
4298                         amdgpu_dm_update_connector_after_detect(aconnector);
4299
4300                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4301                         amdgpu_dm_update_connector_after_detect(aconnector);
4302                         register_backlight_device(dm, link);
4303                         if (dm->num_of_edps)
4304                                 update_connector_ext_caps(aconnector);
4305                         if (psr_feature_enabled)
4306                                 amdgpu_dm_set_psr_caps(link);
4307
4308                         /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4309                          * PSR is also supported.
4310                          */
4311                         if (link->psr_settings.psr_feature_enabled)
4312                                 adev_to_drm(adev)->vblank_disable_immediate = false;
4313                 }
4314
4315
4316         }
4317
4318         /* Software is initialized. Now we can register interrupt handlers. */
4319         switch (adev->asic_type) {
4320 #if defined(CONFIG_DRM_AMD_DC_SI)
4321         case CHIP_TAHITI:
4322         case CHIP_PITCAIRN:
4323         case CHIP_VERDE:
4324         case CHIP_OLAND:
4325                 if (dce60_register_irq_handlers(dm->adev)) {
4326                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4327                         goto fail;
4328                 }
4329                 break;
4330 #endif
4331         case CHIP_BONAIRE:
4332         case CHIP_HAWAII:
4333         case CHIP_KAVERI:
4334         case CHIP_KABINI:
4335         case CHIP_MULLINS:
4336         case CHIP_TONGA:
4337         case CHIP_FIJI:
4338         case CHIP_CARRIZO:
4339         case CHIP_STONEY:
4340         case CHIP_POLARIS11:
4341         case CHIP_POLARIS10:
4342         case CHIP_POLARIS12:
4343         case CHIP_VEGAM:
4344         case CHIP_VEGA10:
4345         case CHIP_VEGA12:
4346         case CHIP_VEGA20:
4347                 if (dce110_register_irq_handlers(dm->adev)) {
4348                         DRM_ERROR("DM: Failed to initialize IRQ\n");
4349                         goto fail;
4350                 }
4351                 break;
4352         default:
4353                 switch (adev->ip_versions[DCE_HWIP][0]) {
4354                 case IP_VERSION(1, 0, 0):
4355                 case IP_VERSION(1, 0, 1):
4356                 case IP_VERSION(2, 0, 2):
4357                 case IP_VERSION(2, 0, 3):
4358                 case IP_VERSION(2, 0, 0):
4359                 case IP_VERSION(2, 1, 0):
4360                 case IP_VERSION(3, 0, 0):
4361                 case IP_VERSION(3, 0, 2):
4362                 case IP_VERSION(3, 0, 3):
4363                 case IP_VERSION(3, 0, 1):
4364                 case IP_VERSION(3, 1, 2):
4365                 case IP_VERSION(3, 1, 3):
4366                 case IP_VERSION(3, 1, 5):
4367                 case IP_VERSION(3, 1, 6):
4368                         if (dcn10_register_irq_handlers(dm->adev)) {
4369                                 DRM_ERROR("DM: Failed to initialize IRQ\n");
4370                                 goto fail;
4371                         }
4372                         break;
4373                 default:
4374                         DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4375                                         adev->ip_versions[DCE_HWIP][0]);
4376                         goto fail;
4377                 }
4378                 break;
4379         }
4380
4381         return 0;
4382 fail:
4383         kfree(aencoder);
4384         kfree(aconnector);
4385
4386         return -EINVAL;
4387 }
4388
4389 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4390 {
4391         drm_atomic_private_obj_fini(&dm->atomic_obj);
4392         return;
4393 }
4394
4395 /******************************************************************************
4396  * amdgpu_display_funcs functions
4397  *****************************************************************************/
4398
4399 /*
4400  * dm_bandwidth_update - program display watermarks
4401  *
4402  * @adev: amdgpu_device pointer
4403  *
4404  * Calculate and program the display watermarks and line buffer allocation.
4405  */
4406 static void dm_bandwidth_update(struct amdgpu_device *adev)
4407 {
4408         /* TODO: implement later */
4409 }
4410
4411 static const struct amdgpu_display_funcs dm_display_funcs = {
4412         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4413         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4414         .backlight_set_level = NULL, /* never called for DC */
4415         .backlight_get_level = NULL, /* never called for DC */
4416         .hpd_sense = NULL,/* called unconditionally */
4417         .hpd_set_polarity = NULL, /* called unconditionally */
4418         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4419         .page_flip_get_scanoutpos =
4420                 dm_crtc_get_scanoutpos,/* called unconditionally */
4421         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4422         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4423 };
4424
4425 #if defined(CONFIG_DEBUG_KERNEL_DC)
4426
4427 static ssize_t s3_debug_store(struct device *device,
4428                               struct device_attribute *attr,
4429                               const char *buf,
4430                               size_t count)
4431 {
4432         int ret;
4433         int s3_state;
4434         struct drm_device *drm_dev = dev_get_drvdata(device);
4435         struct amdgpu_device *adev = drm_to_adev(drm_dev);
4436
4437         ret = kstrtoint(buf, 0, &s3_state);
4438
4439         if (ret == 0) {
4440                 if (s3_state) {
4441                         dm_resume(adev);
4442                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
4443                 } else
4444                         dm_suspend(adev);
4445         }
4446
4447         return ret == 0 ? count : 0;
4448 }
4449
4450 DEVICE_ATTR_WO(s3_debug);
4451
4452 #endif
4453
4454 static int dm_early_init(void *handle)
4455 {
4456         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4457
4458         switch (adev->asic_type) {
4459 #if defined(CONFIG_DRM_AMD_DC_SI)
4460         case CHIP_TAHITI:
4461         case CHIP_PITCAIRN:
4462         case CHIP_VERDE:
4463                 adev->mode_info.num_crtc = 6;
4464                 adev->mode_info.num_hpd = 6;
4465                 adev->mode_info.num_dig = 6;
4466                 break;
4467         case CHIP_OLAND:
4468                 adev->mode_info.num_crtc = 2;
4469                 adev->mode_info.num_hpd = 2;
4470                 adev->mode_info.num_dig = 2;
4471                 break;
4472 #endif
4473         case CHIP_BONAIRE:
4474         case CHIP_HAWAII:
4475                 adev->mode_info.num_crtc = 6;
4476                 adev->mode_info.num_hpd = 6;
4477                 adev->mode_info.num_dig = 6;
4478                 break;
4479         case CHIP_KAVERI:
4480                 adev->mode_info.num_crtc = 4;
4481                 adev->mode_info.num_hpd = 6;
4482                 adev->mode_info.num_dig = 7;
4483                 break;
4484         case CHIP_KABINI:
4485         case CHIP_MULLINS:
4486                 adev->mode_info.num_crtc = 2;
4487                 adev->mode_info.num_hpd = 6;
4488                 adev->mode_info.num_dig = 6;
4489                 break;
4490         case CHIP_FIJI:
4491         case CHIP_TONGA:
4492                 adev->mode_info.num_crtc = 6;
4493                 adev->mode_info.num_hpd = 6;
4494                 adev->mode_info.num_dig = 7;
4495                 break;
4496         case CHIP_CARRIZO:
4497                 adev->mode_info.num_crtc = 3;
4498                 adev->mode_info.num_hpd = 6;
4499                 adev->mode_info.num_dig = 9;
4500                 break;
4501         case CHIP_STONEY:
4502                 adev->mode_info.num_crtc = 2;
4503                 adev->mode_info.num_hpd = 6;
4504                 adev->mode_info.num_dig = 9;
4505                 break;
4506         case CHIP_POLARIS11:
4507         case CHIP_POLARIS12:
4508                 adev->mode_info.num_crtc = 5;
4509                 adev->mode_info.num_hpd = 5;
4510                 adev->mode_info.num_dig = 5;
4511                 break;
4512         case CHIP_POLARIS10:
4513         case CHIP_VEGAM:
4514                 adev->mode_info.num_crtc = 6;
4515                 adev->mode_info.num_hpd = 6;
4516                 adev->mode_info.num_dig = 6;
4517                 break;
4518         case CHIP_VEGA10:
4519         case CHIP_VEGA12:
4520         case CHIP_VEGA20:
4521                 adev->mode_info.num_crtc = 6;
4522                 adev->mode_info.num_hpd = 6;
4523                 adev->mode_info.num_dig = 6;
4524                 break;
4525         default:
4526
4527                 switch (adev->ip_versions[DCE_HWIP][0]) {
4528                 case IP_VERSION(2, 0, 2):
4529                 case IP_VERSION(3, 0, 0):
4530                         adev->mode_info.num_crtc = 6;
4531                         adev->mode_info.num_hpd = 6;
4532                         adev->mode_info.num_dig = 6;
4533                         break;
4534                 case IP_VERSION(2, 0, 0):
4535                 case IP_VERSION(3, 0, 2):
4536                         adev->mode_info.num_crtc = 5;
4537                         adev->mode_info.num_hpd = 5;
4538                         adev->mode_info.num_dig = 5;
4539                         break;
4540                 case IP_VERSION(2, 0, 3):
4541                 case IP_VERSION(3, 0, 3):
4542                         adev->mode_info.num_crtc = 2;
4543                         adev->mode_info.num_hpd = 2;
4544                         adev->mode_info.num_dig = 2;
4545                         break;
4546                 case IP_VERSION(1, 0, 0):
4547                 case IP_VERSION(1, 0, 1):
4548                 case IP_VERSION(3, 0, 1):
4549                 case IP_VERSION(2, 1, 0):
4550                 case IP_VERSION(3, 1, 2):
4551                 case IP_VERSION(3, 1, 3):
4552                 case IP_VERSION(3, 1, 5):
4553                 case IP_VERSION(3, 1, 6):
4554                         adev->mode_info.num_crtc = 4;
4555                         adev->mode_info.num_hpd = 4;
4556                         adev->mode_info.num_dig = 4;
4557                         break;
4558                 default:
4559                         DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4560                                         adev->ip_versions[DCE_HWIP][0]);
4561                         return -EINVAL;
4562                 }
4563                 break;
4564         }
4565
4566         amdgpu_dm_set_irq_funcs(adev);
4567
4568         if (adev->mode_info.funcs == NULL)
4569                 adev->mode_info.funcs = &dm_display_funcs;
4570
4571         /*
4572          * Note: Do NOT change adev->audio_endpt_rreg and
4573          * adev->audio_endpt_wreg because they are initialised in
4574          * amdgpu_device_init()
4575          */
4576 #if defined(CONFIG_DEBUG_KERNEL_DC)
4577         device_create_file(
4578                 adev_to_drm(adev)->dev,
4579                 &dev_attr_s3_debug);
4580 #endif
4581
4582         return 0;
4583 }
4584
4585 static bool modeset_required(struct drm_crtc_state *crtc_state,
4586                              struct dc_stream_state *new_stream,
4587                              struct dc_stream_state *old_stream)
4588 {
4589         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4590 }
4591
4592 static bool modereset_required(struct drm_crtc_state *crtc_state)
4593 {
4594         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4595 }
4596
4597 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4598 {
4599         drm_encoder_cleanup(encoder);
4600         kfree(encoder);
4601 }
4602
4603 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4604         .destroy = amdgpu_dm_encoder_destroy,
4605 };
4606
4607
4608 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4609                                          struct drm_framebuffer *fb,
4610                                          int *min_downscale, int *max_upscale)
4611 {
4612         struct amdgpu_device *adev = drm_to_adev(dev);
4613         struct dc *dc = adev->dm.dc;
4614         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4615         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4616
4617         switch (fb->format->format) {
4618         case DRM_FORMAT_P010:
4619         case DRM_FORMAT_NV12:
4620         case DRM_FORMAT_NV21:
4621                 *max_upscale = plane_cap->max_upscale_factor.nv12;
4622                 *min_downscale = plane_cap->max_downscale_factor.nv12;
4623                 break;
4624
4625         case DRM_FORMAT_XRGB16161616F:
4626         case DRM_FORMAT_ARGB16161616F:
4627         case DRM_FORMAT_XBGR16161616F:
4628         case DRM_FORMAT_ABGR16161616F:
4629                 *max_upscale = plane_cap->max_upscale_factor.fp16;
4630                 *min_downscale = plane_cap->max_downscale_factor.fp16;
4631                 break;
4632
4633         default:
4634                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4635                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4636                 break;
4637         }
4638
4639         /*
4640          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4641          * scaling factor of 1.0 == 1000 units.
4642          */
4643         if (*max_upscale == 1)
4644                 *max_upscale = 1000;
4645
4646         if (*min_downscale == 1)
4647                 *min_downscale = 1000;
4648 }
4649
4650
4651 static int fill_dc_scaling_info(struct amdgpu_device *adev,
4652                                 const struct drm_plane_state *state,
4653                                 struct dc_scaling_info *scaling_info)
4654 {
4655         int scale_w, scale_h, min_downscale, max_upscale;
4656
4657         memset(scaling_info, 0, sizeof(*scaling_info));
4658
4659         /* Source is fixed 16.16 but we ignore mantissa for now... */
4660         scaling_info->src_rect.x = state->src_x >> 16;
4661         scaling_info->src_rect.y = state->src_y >> 16;
4662
4663         /*
4664          * For reasons we don't (yet) fully understand a non-zero
4665          * src_y coordinate into an NV12 buffer can cause a
4666          * system hang on DCN1x.
4667          * To avoid hangs (and maybe be overly cautious)
4668          * let's reject both non-zero src_x and src_y.
4669          *
4670          * We currently know of only one use-case to reproduce a
4671          * scenario with non-zero src_x and src_y for NV12, which
4672          * is to gesture the YouTube Android app into full screen
4673          * on ChromeOS.
4674          */
4675         if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4676             (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4677             (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4678             (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4679                 return -EINVAL;
4680
4681         scaling_info->src_rect.width = state->src_w >> 16;
4682         if (scaling_info->src_rect.width == 0)
4683                 return -EINVAL;
4684
4685         scaling_info->src_rect.height = state->src_h >> 16;
4686         if (scaling_info->src_rect.height == 0)
4687                 return -EINVAL;
4688
4689         scaling_info->dst_rect.x = state->crtc_x;
4690         scaling_info->dst_rect.y = state->crtc_y;
4691
4692         if (state->crtc_w == 0)
4693                 return -EINVAL;
4694
4695         scaling_info->dst_rect.width = state->crtc_w;
4696
4697         if (state->crtc_h == 0)
4698                 return -EINVAL;
4699
4700         scaling_info->dst_rect.height = state->crtc_h;
4701
4702         /* DRM doesn't specify clipping on destination output. */
4703         scaling_info->clip_rect = scaling_info->dst_rect;
4704
4705         /* Validate scaling per-format with DC plane caps */
4706         if (state->plane && state->plane->dev && state->fb) {
4707                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4708                                              &min_downscale, &max_upscale);
4709         } else {
4710                 min_downscale = 250;
4711                 max_upscale = 16000;
4712         }
4713
4714         scale_w = scaling_info->dst_rect.width * 1000 /
4715                   scaling_info->src_rect.width;
4716
4717         if (scale_w < min_downscale || scale_w > max_upscale)
4718                 return -EINVAL;
4719
4720         scale_h = scaling_info->dst_rect.height * 1000 /
4721                   scaling_info->src_rect.height;
4722
4723         if (scale_h < min_downscale || scale_h > max_upscale)
4724                 return -EINVAL;
4725
4726         /*
4727          * The "scaling_quality" can be ignored for now, quality = 0 has DC
4728          * assume reasonable defaults based on the format.
4729          */
4730
4731         return 0;
4732 }
4733
4734 static void
4735 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4736                                  uint64_t tiling_flags)
4737 {
4738         /* Fill GFX8 params */
4739         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4740                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4741
4742                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4743                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4744                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4745                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4746                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4747
4748                 /* XXX fix me for VI */
4749                 tiling_info->gfx8.num_banks = num_banks;
4750                 tiling_info->gfx8.array_mode =
4751                                 DC_ARRAY_2D_TILED_THIN1;
4752                 tiling_info->gfx8.tile_split = tile_split;
4753                 tiling_info->gfx8.bank_width = bankw;
4754                 tiling_info->gfx8.bank_height = bankh;
4755                 tiling_info->gfx8.tile_aspect = mtaspect;
4756                 tiling_info->gfx8.tile_mode =
4757                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4758         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4759                         == DC_ARRAY_1D_TILED_THIN1) {
4760                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4761         }
4762
4763         tiling_info->gfx8.pipe_config =
4764                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4765 }
4766
4767 static void
4768 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4769                                   union dc_tiling_info *tiling_info)
4770 {
4771         tiling_info->gfx9.num_pipes =
4772                 adev->gfx.config.gb_addr_config_fields.num_pipes;
4773         tiling_info->gfx9.num_banks =
4774                 adev->gfx.config.gb_addr_config_fields.num_banks;
4775         tiling_info->gfx9.pipe_interleave =
4776                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4777         tiling_info->gfx9.num_shader_engines =
4778                 adev->gfx.config.gb_addr_config_fields.num_se;
4779         tiling_info->gfx9.max_compressed_frags =
4780                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4781         tiling_info->gfx9.num_rb_per_se =
4782                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4783         tiling_info->gfx9.shaderEnable = 1;
4784         if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4785                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4786 }
4787
4788 static int
4789 validate_dcc(struct amdgpu_device *adev,
4790              const enum surface_pixel_format format,
4791              const enum dc_rotation_angle rotation,
4792              const union dc_tiling_info *tiling_info,
4793              const struct dc_plane_dcc_param *dcc,
4794              const struct dc_plane_address *address,
4795              const struct plane_size *plane_size)
4796 {
4797         struct dc *dc = adev->dm.dc;
4798         struct dc_dcc_surface_param input;
4799         struct dc_surface_dcc_cap output;
4800
4801         memset(&input, 0, sizeof(input));
4802         memset(&output, 0, sizeof(output));
4803
4804         if (!dcc->enable)
4805                 return 0;
4806
4807         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4808             !dc->cap_funcs.get_dcc_compression_cap)
4809                 return -EINVAL;
4810
4811         input.format = format;
4812         input.surface_size.width = plane_size->surface_size.width;
4813         input.surface_size.height = plane_size->surface_size.height;
4814         input.swizzle_mode = tiling_info->gfx9.swizzle;
4815
4816         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4817                 input.scan = SCAN_DIRECTION_HORIZONTAL;
4818         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4819                 input.scan = SCAN_DIRECTION_VERTICAL;
4820
4821         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4822                 return -EINVAL;
4823
4824         if (!output.capable)
4825                 return -EINVAL;
4826
4827         if (dcc->independent_64b_blks == 0 &&
4828             output.grph.rgb.independent_64b_blks != 0)
4829                 return -EINVAL;
4830
4831         return 0;
4832 }
4833
4834 static bool
4835 modifier_has_dcc(uint64_t modifier)
4836 {
4837         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4838 }
4839
4840 static unsigned
4841 modifier_gfx9_swizzle_mode(uint64_t modifier)
4842 {
4843         if (modifier == DRM_FORMAT_MOD_LINEAR)
4844                 return 0;
4845
4846         return AMD_FMT_MOD_GET(TILE, modifier);
4847 }
4848
4849 static const struct drm_format_info *
4850 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4851 {
4852         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4853 }
4854
4855 static void
4856 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4857                                     union dc_tiling_info *tiling_info,
4858                                     uint64_t modifier)
4859 {
4860         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4861         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4862         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4863         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4864
4865         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4866
4867         if (!IS_AMD_FMT_MOD(modifier))
4868                 return;
4869
4870         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4871         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4872
4873         if (adev->family >= AMDGPU_FAMILY_NV) {
4874                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4875         } else {
4876                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4877
4878                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4879         }
4880 }
4881
4882 enum dm_micro_swizzle {
4883         MICRO_SWIZZLE_Z = 0,
4884         MICRO_SWIZZLE_S = 1,
4885         MICRO_SWIZZLE_D = 2,
4886         MICRO_SWIZZLE_R = 3
4887 };
4888
4889 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4890                                           uint32_t format,
4891                                           uint64_t modifier)
4892 {
4893         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4894         const struct drm_format_info *info = drm_format_info(format);
4895         int i;
4896
4897         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4898
4899         if (!info)
4900                 return false;
4901
4902         /*
4903          * We always have to allow these modifiers:
4904          * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4905          * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4906          */
4907         if (modifier == DRM_FORMAT_MOD_LINEAR ||
4908             modifier == DRM_FORMAT_MOD_INVALID) {
4909                 return true;
4910         }
4911
4912         /* Check that the modifier is on the list of the plane's supported modifiers. */
4913         for (i = 0; i < plane->modifier_count; i++) {
4914                 if (modifier == plane->modifiers[i])
4915                         break;
4916         }
4917         if (i == plane->modifier_count)
4918                 return false;
4919
4920         /*
4921          * For D swizzle the canonical modifier depends on the bpp, so check
4922          * it here.
4923          */
4924         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4925             adev->family >= AMDGPU_FAMILY_NV) {
4926                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4927                         return false;
4928         }
4929
4930         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4931             info->cpp[0] < 8)
4932                 return false;
4933
4934         if (modifier_has_dcc(modifier)) {
4935                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4936                 if (info->cpp[0] != 4)
4937                         return false;
4938                 /* We support multi-planar formats, but not when combined with
4939                  * additional DCC metadata planes. */
4940                 if (info->num_planes > 1)
4941                         return false;
4942         }
4943
4944         return true;
4945 }
4946
4947 static void
4948 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4949 {
4950         if (!*mods)
4951                 return;
4952
4953         if (*cap - *size < 1) {
4954                 uint64_t new_cap = *cap * 2;
4955                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4956
4957                 if (!new_mods) {
4958                         kfree(*mods);
4959                         *mods = NULL;
4960                         return;
4961                 }
4962
4963                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4964                 kfree(*mods);
4965                 *mods = new_mods;
4966                 *cap = new_cap;
4967         }
4968
4969         (*mods)[*size] = mod;
4970         *size += 1;
4971 }
4972
4973 static void
4974 add_gfx9_modifiers(const struct amdgpu_device *adev,
4975                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4976 {
4977         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4978         int pipe_xor_bits = min(8, pipes +
4979                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4980         int bank_xor_bits = min(8 - pipe_xor_bits,
4981                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4982         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4983                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4984
4985
4986         if (adev->family == AMDGPU_FAMILY_RV) {
4987                 /* Raven2 and later */
4988                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4989
4990                 /*
4991                  * No _D DCC swizzles yet because we only allow 32bpp, which
4992                  * doesn't support _D on DCN
4993                  */
4994
4995                 if (has_constant_encode) {
4996                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4997                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4998                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4999                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5000                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5001                                     AMD_FMT_MOD_SET(DCC, 1) |
5002                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5003                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5004                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
5005                 }
5006
5007                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5008                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5009                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5010                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5011                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5012                             AMD_FMT_MOD_SET(DCC, 1) |
5013                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5014                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5015                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
5016
5017                 if (has_constant_encode) {
5018                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5019                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5020                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5021                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5022                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5023                                     AMD_FMT_MOD_SET(DCC, 1) |
5024                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5025                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5026                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5027
5028                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5029                                     AMD_FMT_MOD_SET(RB, rb) |
5030                                     AMD_FMT_MOD_SET(PIPE, pipes));
5031                 }
5032
5033                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5034                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5035                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5036                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5037                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5038                             AMD_FMT_MOD_SET(DCC, 1) |
5039                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5040                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5041                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5042                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5043                             AMD_FMT_MOD_SET(RB, rb) |
5044                             AMD_FMT_MOD_SET(PIPE, pipes));
5045         }
5046
5047         /*
5048          * Only supported for 64bpp on Raven, will be filtered on format in
5049          * dm_plane_format_mod_supported.
5050          */
5051         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5052                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5053                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5054                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5055                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5056
5057         if (adev->family == AMDGPU_FAMILY_RV) {
5058                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5059                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5060                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5061                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5062                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5063         }
5064
5065         /*
5066          * Only supported for 64bpp on Raven, will be filtered on format in
5067          * dm_plane_format_mod_supported.
5068          */
5069         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5071                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5072
5073         if (adev->family == AMDGPU_FAMILY_RV) {
5074                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5076                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5077         }
5078 }
5079
5080 static void
5081 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5082                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5083 {
5084         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5085
5086         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5087                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5088                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5089                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5090                     AMD_FMT_MOD_SET(DCC, 1) |
5091                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5092                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5093                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5094
5095         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5096                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5097                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5098                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5099                     AMD_FMT_MOD_SET(DCC, 1) |
5100                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5101                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5102                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5103                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5104
5105         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5107                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5108                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5109
5110         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5111                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5112                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5113                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5114
5115
5116         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5117         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5118                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5119                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5120
5121         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5122                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5123                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5124 }
5125
5126 static void
5127 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5128                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
5129 {
5130         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5131         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5132
5133         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5134                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5135                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5136                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5137                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5138                     AMD_FMT_MOD_SET(DCC, 1) |
5139                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5140                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5141                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5142                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5143
5144         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5145                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5146                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5147                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5148                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5149                     AMD_FMT_MOD_SET(DCC, 1) |
5150                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5151                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5152                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5153
5154         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5156                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5157                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5158                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5159                     AMD_FMT_MOD_SET(DCC, 1) |
5160                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5161                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5162                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5163                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5164                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5165
5166         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5167                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5168                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5169                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5170                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
5171                     AMD_FMT_MOD_SET(DCC, 1) |
5172                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5173                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5174                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5175                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5176
5177         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5178                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5179                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5180                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5181                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5182
5183         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5184                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5185                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5186                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5187                     AMD_FMT_MOD_SET(PACKERS, pkrs));
5188
5189         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5190         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5191                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5192                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5193
5194         add_modifier(mods, size, capacity, AMD_FMT_MOD |
5195                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5196                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5197 }
5198
5199 static int
5200 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5201 {
5202         uint64_t size = 0, capacity = 128;
5203         *mods = NULL;
5204
5205         /* We have not hooked up any pre-GFX9 modifiers. */
5206         if (adev->family < AMDGPU_FAMILY_AI)
5207                 return 0;
5208
5209         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5210
5211         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5212                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5213                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5214                 return *mods ? 0 : -ENOMEM;
5215         }
5216
5217         switch (adev->family) {
5218         case AMDGPU_FAMILY_AI:
5219         case AMDGPU_FAMILY_RV:
5220                 add_gfx9_modifiers(adev, mods, &size, &capacity);
5221                 break;
5222         case AMDGPU_FAMILY_NV:
5223         case AMDGPU_FAMILY_VGH:
5224         case AMDGPU_FAMILY_YC:
5225         case AMDGPU_FAMILY_GC_10_3_6:
5226         case AMDGPU_FAMILY_GC_10_3_7:
5227                 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5228                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5229                 else
5230                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5231                 break;
5232         }
5233
5234         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5235
5236         /* INVALID marks the end of the list. */
5237         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5238
5239         if (!*mods)
5240                 return -ENOMEM;
5241
5242         return 0;
5243 }
5244
5245 static int
5246 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5247                                           const struct amdgpu_framebuffer *afb,
5248                                           const enum surface_pixel_format format,
5249                                           const enum dc_rotation_angle rotation,
5250                                           const struct plane_size *plane_size,
5251                                           union dc_tiling_info *tiling_info,
5252                                           struct dc_plane_dcc_param *dcc,
5253                                           struct dc_plane_address *address,
5254                                           const bool force_disable_dcc)
5255 {
5256         const uint64_t modifier = afb->base.modifier;
5257         int ret = 0;
5258
5259         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5260         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5261
5262         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5263                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5264                 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5265                 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5266
5267                 dcc->enable = 1;
5268                 dcc->meta_pitch = afb->base.pitches[1];
5269                 dcc->independent_64b_blks = independent_64b_blks;
5270                 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5271                         if (independent_64b_blks && independent_128b_blks)
5272                                 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5273                         else if (independent_128b_blks)
5274                                 dcc->dcc_ind_blk = hubp_ind_block_128b;
5275                         else if (independent_64b_blks && !independent_128b_blks)
5276                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5277                         else
5278                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5279                 } else {
5280                         if (independent_64b_blks)
5281                                 dcc->dcc_ind_blk = hubp_ind_block_64b;
5282                         else
5283                                 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5284                 }
5285
5286                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5287                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5288         }
5289
5290         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5291         if (ret)
5292                 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5293
5294         return ret;
5295 }
5296
5297 static int
5298 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5299                              const struct amdgpu_framebuffer *afb,
5300                              const enum surface_pixel_format format,
5301                              const enum dc_rotation_angle rotation,
5302                              const uint64_t tiling_flags,
5303                              union dc_tiling_info *tiling_info,
5304                              struct plane_size *plane_size,
5305                              struct dc_plane_dcc_param *dcc,
5306                              struct dc_plane_address *address,
5307                              bool tmz_surface,
5308                              bool force_disable_dcc)
5309 {
5310         const struct drm_framebuffer *fb = &afb->base;
5311         int ret;
5312
5313         memset(tiling_info, 0, sizeof(*tiling_info));
5314         memset(plane_size, 0, sizeof(*plane_size));
5315         memset(dcc, 0, sizeof(*dcc));
5316         memset(address, 0, sizeof(*address));
5317
5318         address->tmz_surface = tmz_surface;
5319
5320         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5321                 uint64_t addr = afb->address + fb->offsets[0];
5322
5323                 plane_size->surface_size.x = 0;
5324                 plane_size->surface_size.y = 0;
5325                 plane_size->surface_size.width = fb->width;
5326                 plane_size->surface_size.height = fb->height;
5327                 plane_size->surface_pitch =
5328                         fb->pitches[0] / fb->format->cpp[0];
5329
5330                 address->type = PLN_ADDR_TYPE_GRAPHICS;
5331                 address->grph.addr.low_part = lower_32_bits(addr);
5332                 address->grph.addr.high_part = upper_32_bits(addr);
5333         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5334                 uint64_t luma_addr = afb->address + fb->offsets[0];
5335                 uint64_t chroma_addr = afb->address + fb->offsets[1];
5336
5337                 plane_size->surface_size.x = 0;
5338                 plane_size->surface_size.y = 0;
5339                 plane_size->surface_size.width = fb->width;
5340                 plane_size->surface_size.height = fb->height;
5341                 plane_size->surface_pitch =
5342                         fb->pitches[0] / fb->format->cpp[0];
5343
5344                 plane_size->chroma_size.x = 0;
5345                 plane_size->chroma_size.y = 0;
5346                 /* TODO: set these based on surface format */
5347                 plane_size->chroma_size.width = fb->width / 2;
5348                 plane_size->chroma_size.height = fb->height / 2;
5349
5350                 plane_size->chroma_pitch =
5351                         fb->pitches[1] / fb->format->cpp[1];
5352
5353                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5354                 address->video_progressive.luma_addr.low_part =
5355                         lower_32_bits(luma_addr);
5356                 address->video_progressive.luma_addr.high_part =
5357                         upper_32_bits(luma_addr);
5358                 address->video_progressive.chroma_addr.low_part =
5359                         lower_32_bits(chroma_addr);
5360                 address->video_progressive.chroma_addr.high_part =
5361                         upper_32_bits(chroma_addr);
5362         }
5363
5364         if (adev->family >= AMDGPU_FAMILY_AI) {
5365                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5366                                                                 rotation, plane_size,
5367                                                                 tiling_info, dcc,
5368                                                                 address,
5369                                                                 force_disable_dcc);
5370                 if (ret)
5371                         return ret;
5372         } else {
5373                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5374         }
5375
5376         return 0;
5377 }
5378
5379 static void
5380 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5381                                bool *per_pixel_alpha, bool *pre_multiplied_alpha,
5382                                bool *global_alpha, int *global_alpha_value)
5383 {
5384         *per_pixel_alpha = false;
5385         *pre_multiplied_alpha = true;
5386         *global_alpha = false;
5387         *global_alpha_value = 0xff;
5388
5389         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5390                 return;
5391
5392         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
5393                 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
5394                 static const uint32_t alpha_formats[] = {
5395                         DRM_FORMAT_ARGB8888,
5396                         DRM_FORMAT_RGBA8888,
5397                         DRM_FORMAT_ABGR8888,
5398                 };
5399                 uint32_t format = plane_state->fb->format->format;
5400                 unsigned int i;
5401
5402                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5403                         if (format == alpha_formats[i]) {
5404                                 *per_pixel_alpha = true;
5405                                 break;
5406                         }
5407                 }
5408
5409                 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
5410                         *pre_multiplied_alpha = false;
5411         }
5412
5413         if (plane_state->alpha < 0xffff) {
5414                 *global_alpha = true;
5415                 *global_alpha_value = plane_state->alpha >> 8;
5416         }
5417 }
5418
5419 static int
5420 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5421                             const enum surface_pixel_format format,
5422                             enum dc_color_space *color_space)
5423 {
5424         bool full_range;
5425
5426         *color_space = COLOR_SPACE_SRGB;
5427
5428         /* DRM color properties only affect non-RGB formats. */
5429         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5430                 return 0;
5431
5432         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5433
5434         switch (plane_state->color_encoding) {
5435         case DRM_COLOR_YCBCR_BT601:
5436                 if (full_range)
5437                         *color_space = COLOR_SPACE_YCBCR601;
5438                 else
5439                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5440                 break;
5441
5442         case DRM_COLOR_YCBCR_BT709:
5443                 if (full_range)
5444                         *color_space = COLOR_SPACE_YCBCR709;
5445                 else
5446                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5447                 break;
5448
5449         case DRM_COLOR_YCBCR_BT2020:
5450                 if (full_range)
5451                         *color_space = COLOR_SPACE_2020_YCBCR;
5452                 else
5453                         return -EINVAL;
5454                 break;
5455
5456         default:
5457                 return -EINVAL;
5458         }
5459
5460         return 0;
5461 }
5462
5463 static int
5464 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5465                             const struct drm_plane_state *plane_state,
5466                             const uint64_t tiling_flags,
5467                             struct dc_plane_info *plane_info,
5468                             struct dc_plane_address *address,
5469                             bool tmz_surface,
5470                             bool force_disable_dcc)
5471 {
5472         const struct drm_framebuffer *fb = plane_state->fb;
5473         const struct amdgpu_framebuffer *afb =
5474                 to_amdgpu_framebuffer(plane_state->fb);
5475         int ret;
5476
5477         memset(plane_info, 0, sizeof(*plane_info));
5478
5479         switch (fb->format->format) {
5480         case DRM_FORMAT_C8:
5481                 plane_info->format =
5482                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5483                 break;
5484         case DRM_FORMAT_RGB565:
5485                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5486                 break;
5487         case DRM_FORMAT_XRGB8888:
5488         case DRM_FORMAT_ARGB8888:
5489                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5490                 break;
5491         case DRM_FORMAT_XRGB2101010:
5492         case DRM_FORMAT_ARGB2101010:
5493                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5494                 break;
5495         case DRM_FORMAT_XBGR2101010:
5496         case DRM_FORMAT_ABGR2101010:
5497                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5498                 break;
5499         case DRM_FORMAT_XBGR8888:
5500         case DRM_FORMAT_ABGR8888:
5501                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5502                 break;
5503         case DRM_FORMAT_NV21:
5504                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5505                 break;
5506         case DRM_FORMAT_NV12:
5507                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5508                 break;
5509         case DRM_FORMAT_P010:
5510                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5511                 break;
5512         case DRM_FORMAT_XRGB16161616F:
5513         case DRM_FORMAT_ARGB16161616F:
5514                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5515                 break;
5516         case DRM_FORMAT_XBGR16161616F:
5517         case DRM_FORMAT_ABGR16161616F:
5518                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5519                 break;
5520         case DRM_FORMAT_XRGB16161616:
5521         case DRM_FORMAT_ARGB16161616:
5522                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5523                 break;
5524         case DRM_FORMAT_XBGR16161616:
5525         case DRM_FORMAT_ABGR16161616:
5526                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5527                 break;
5528         default:
5529                 DRM_ERROR(
5530                         "Unsupported screen format %p4cc\n",
5531                         &fb->format->format);
5532                 return -EINVAL;
5533         }
5534
5535         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5536         case DRM_MODE_ROTATE_0:
5537                 plane_info->rotation = ROTATION_ANGLE_0;
5538                 break;
5539         case DRM_MODE_ROTATE_90:
5540                 plane_info->rotation = ROTATION_ANGLE_90;
5541                 break;
5542         case DRM_MODE_ROTATE_180:
5543                 plane_info->rotation = ROTATION_ANGLE_180;
5544                 break;
5545         case DRM_MODE_ROTATE_270:
5546                 plane_info->rotation = ROTATION_ANGLE_270;
5547                 break;
5548         default:
5549                 plane_info->rotation = ROTATION_ANGLE_0;
5550                 break;
5551         }
5552
5553         plane_info->visible = true;
5554         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5555
5556         plane_info->layer_index = 0;
5557
5558         ret = fill_plane_color_attributes(plane_state, plane_info->format,
5559                                           &plane_info->color_space);
5560         if (ret)
5561                 return ret;
5562
5563         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5564                                            plane_info->rotation, tiling_flags,
5565                                            &plane_info->tiling_info,
5566                                            &plane_info->plane_size,
5567                                            &plane_info->dcc, address, tmz_surface,
5568                                            force_disable_dcc);
5569         if (ret)
5570                 return ret;
5571
5572         fill_blending_from_plane_state(
5573                 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5574                 &plane_info->global_alpha, &plane_info->global_alpha_value);
5575
5576         return 0;
5577 }
5578
5579 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5580                                     struct dc_plane_state *dc_plane_state,
5581                                     struct drm_plane_state *plane_state,
5582                                     struct drm_crtc_state *crtc_state)
5583 {
5584         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5585         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5586         struct dc_scaling_info scaling_info;
5587         struct dc_plane_info plane_info;
5588         int ret;
5589         bool force_disable_dcc = false;
5590
5591         ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5592         if (ret)
5593                 return ret;
5594
5595         dc_plane_state->src_rect = scaling_info.src_rect;
5596         dc_plane_state->dst_rect = scaling_info.dst_rect;
5597         dc_plane_state->clip_rect = scaling_info.clip_rect;
5598         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5599
5600         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5601         ret = fill_dc_plane_info_and_addr(adev, plane_state,
5602                                           afb->tiling_flags,
5603                                           &plane_info,
5604                                           &dc_plane_state->address,
5605                                           afb->tmz_surface,
5606                                           force_disable_dcc);
5607         if (ret)
5608                 return ret;
5609
5610         dc_plane_state->format = plane_info.format;
5611         dc_plane_state->color_space = plane_info.color_space;
5612         dc_plane_state->format = plane_info.format;
5613         dc_plane_state->plane_size = plane_info.plane_size;
5614         dc_plane_state->rotation = plane_info.rotation;
5615         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5616         dc_plane_state->stereo_format = plane_info.stereo_format;
5617         dc_plane_state->tiling_info = plane_info.tiling_info;
5618         dc_plane_state->visible = plane_info.visible;
5619         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5620         dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5621         dc_plane_state->global_alpha = plane_info.global_alpha;
5622         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5623         dc_plane_state->dcc = plane_info.dcc;
5624         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5625         dc_plane_state->flip_int_enabled = true;
5626
5627         /*
5628          * Always set input transfer function, since plane state is refreshed
5629          * every time.
5630          */
5631         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5632         if (ret)
5633                 return ret;
5634
5635         return 0;
5636 }
5637
5638 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5639                                            const struct dm_connector_state *dm_state,
5640                                            struct dc_stream_state *stream)
5641 {
5642         enum amdgpu_rmx_type rmx_type;
5643
5644         struct rect src = { 0 }; /* viewport in composition space*/
5645         struct rect dst = { 0 }; /* stream addressable area */
5646
5647         /* no mode. nothing to be done */
5648         if (!mode)
5649                 return;
5650
5651         /* Full screen scaling by default */
5652         src.width = mode->hdisplay;
5653         src.height = mode->vdisplay;
5654         dst.width = stream->timing.h_addressable;
5655         dst.height = stream->timing.v_addressable;
5656
5657         if (dm_state) {
5658                 rmx_type = dm_state->scaling;
5659                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5660                         if (src.width * dst.height <
5661                                         src.height * dst.width) {
5662                                 /* height needs less upscaling/more downscaling */
5663                                 dst.width = src.width *
5664                                                 dst.height / src.height;
5665                         } else {
5666                                 /* width needs less upscaling/more downscaling */
5667                                 dst.height = src.height *
5668                                                 dst.width / src.width;
5669                         }
5670                 } else if (rmx_type == RMX_CENTER) {
5671                         dst = src;
5672                 }
5673
5674                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5675                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5676
5677                 if (dm_state->underscan_enable) {
5678                         dst.x += dm_state->underscan_hborder / 2;
5679                         dst.y += dm_state->underscan_vborder / 2;
5680                         dst.width -= dm_state->underscan_hborder;
5681                         dst.height -= dm_state->underscan_vborder;
5682                 }
5683         }
5684
5685         stream->src = src;
5686         stream->dst = dst;
5687
5688         DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5689                       dst.x, dst.y, dst.width, dst.height);
5690
5691 }
5692
5693 static enum dc_color_depth
5694 convert_color_depth_from_display_info(const struct drm_connector *connector,
5695                                       bool is_y420, int requested_bpc)
5696 {
5697         uint8_t bpc;
5698
5699         if (is_y420) {
5700                 bpc = 8;
5701
5702                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5703                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5704                         bpc = 16;
5705                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5706                         bpc = 12;
5707                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5708                         bpc = 10;
5709         } else {
5710                 bpc = (uint8_t)connector->display_info.bpc;
5711                 /* Assume 8 bpc by default if no bpc is specified. */
5712                 bpc = bpc ? bpc : 8;
5713         }
5714
5715         if (requested_bpc > 0) {
5716                 /*
5717                  * Cap display bpc based on the user requested value.
5718                  *
5719                  * The value for state->max_bpc may not correctly updated
5720                  * depending on when the connector gets added to the state
5721                  * or if this was called outside of atomic check, so it
5722                  * can't be used directly.
5723                  */
5724                 bpc = min_t(u8, bpc, requested_bpc);
5725
5726                 /* Round down to the nearest even number. */
5727                 bpc = bpc - (bpc & 1);
5728         }
5729
5730         switch (bpc) {
5731         case 0:
5732                 /*
5733                  * Temporary Work around, DRM doesn't parse color depth for
5734                  * EDID revision before 1.4
5735                  * TODO: Fix edid parsing
5736                  */
5737                 return COLOR_DEPTH_888;
5738         case 6:
5739                 return COLOR_DEPTH_666;
5740         case 8:
5741                 return COLOR_DEPTH_888;
5742         case 10:
5743                 return COLOR_DEPTH_101010;
5744         case 12:
5745                 return COLOR_DEPTH_121212;
5746         case 14:
5747                 return COLOR_DEPTH_141414;
5748         case 16:
5749                 return COLOR_DEPTH_161616;
5750         default:
5751                 return COLOR_DEPTH_UNDEFINED;
5752         }
5753 }
5754
5755 static enum dc_aspect_ratio
5756 get_aspect_ratio(const struct drm_display_mode *mode_in)
5757 {
5758         /* 1-1 mapping, since both enums follow the HDMI spec. */
5759         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5760 }
5761
5762 static enum dc_color_space
5763 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5764 {
5765         enum dc_color_space color_space = COLOR_SPACE_SRGB;
5766
5767         switch (dc_crtc_timing->pixel_encoding) {
5768         case PIXEL_ENCODING_YCBCR422:
5769         case PIXEL_ENCODING_YCBCR444:
5770         case PIXEL_ENCODING_YCBCR420:
5771         {
5772                 /*
5773                  * 27030khz is the separation point between HDTV and SDTV
5774                  * according to HDMI spec, we use YCbCr709 and YCbCr601
5775                  * respectively
5776                  */
5777                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5778                         if (dc_crtc_timing->flags.Y_ONLY)
5779                                 color_space =
5780                                         COLOR_SPACE_YCBCR709_LIMITED;
5781                         else
5782                                 color_space = COLOR_SPACE_YCBCR709;
5783                 } else {
5784                         if (dc_crtc_timing->flags.Y_ONLY)
5785                                 color_space =
5786                                         COLOR_SPACE_YCBCR601_LIMITED;
5787                         else
5788                                 color_space = COLOR_SPACE_YCBCR601;
5789                 }
5790
5791         }
5792         break;
5793         case PIXEL_ENCODING_RGB:
5794                 color_space = COLOR_SPACE_SRGB;
5795                 break;
5796
5797         default:
5798                 WARN_ON(1);
5799                 break;
5800         }
5801
5802         return color_space;
5803 }
5804
5805 static bool adjust_colour_depth_from_display_info(
5806         struct dc_crtc_timing *timing_out,
5807         const struct drm_display_info *info)
5808 {
5809         enum dc_color_depth depth = timing_out->display_color_depth;
5810         int normalized_clk;
5811         do {
5812                 normalized_clk = timing_out->pix_clk_100hz / 10;
5813                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5814                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5815                         normalized_clk /= 2;
5816                 /* Adjusting pix clock following on HDMI spec based on colour depth */
5817                 switch (depth) {
5818                 case COLOR_DEPTH_888:
5819                         break;
5820                 case COLOR_DEPTH_101010:
5821                         normalized_clk = (normalized_clk * 30) / 24;
5822                         break;
5823                 case COLOR_DEPTH_121212:
5824                         normalized_clk = (normalized_clk * 36) / 24;
5825                         break;
5826                 case COLOR_DEPTH_161616:
5827                         normalized_clk = (normalized_clk * 48) / 24;
5828                         break;
5829                 default:
5830                         /* The above depths are the only ones valid for HDMI. */
5831                         return false;
5832                 }
5833                 if (normalized_clk <= info->max_tmds_clock) {
5834                         timing_out->display_color_depth = depth;
5835                         return true;
5836                 }
5837         } while (--depth > COLOR_DEPTH_666);
5838         return false;
5839 }
5840
5841 static void fill_stream_properties_from_drm_display_mode(
5842         struct dc_stream_state *stream,
5843         const struct drm_display_mode *mode_in,
5844         const struct drm_connector *connector,
5845         const struct drm_connector_state *connector_state,
5846         const struct dc_stream_state *old_stream,
5847         int requested_bpc)
5848 {
5849         struct dc_crtc_timing *timing_out = &stream->timing;
5850         const struct drm_display_info *info = &connector->display_info;
5851         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5852         struct hdmi_vendor_infoframe hv_frame;
5853         struct hdmi_avi_infoframe avi_frame;
5854
5855         memset(&hv_frame, 0, sizeof(hv_frame));
5856         memset(&avi_frame, 0, sizeof(avi_frame));
5857
5858         timing_out->h_border_left = 0;
5859         timing_out->h_border_right = 0;
5860         timing_out->v_border_top = 0;
5861         timing_out->v_border_bottom = 0;
5862         /* TODO: un-hardcode */
5863         if (drm_mode_is_420_only(info, mode_in)
5864                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5865                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5866         else if (drm_mode_is_420_also(info, mode_in)
5867                         && aconnector->force_yuv420_output)
5868                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5869         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5870                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5871                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5872         else
5873                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5874
5875         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5876         timing_out->display_color_depth = convert_color_depth_from_display_info(
5877                 connector,
5878                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5879                 requested_bpc);
5880         timing_out->scan_type = SCANNING_TYPE_NODATA;
5881         timing_out->hdmi_vic = 0;
5882
5883         if(old_stream) {
5884                 timing_out->vic = old_stream->timing.vic;
5885                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5886                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5887         } else {
5888                 timing_out->vic = drm_match_cea_mode(mode_in);
5889                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5890                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5891                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5892                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5893         }
5894
5895         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5896                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5897                 timing_out->vic = avi_frame.video_code;
5898                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5899                 timing_out->hdmi_vic = hv_frame.vic;
5900         }
5901
5902         if (is_freesync_video_mode(mode_in, aconnector)) {
5903                 timing_out->h_addressable = mode_in->hdisplay;
5904                 timing_out->h_total = mode_in->htotal;
5905                 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5906                 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5907                 timing_out->v_total = mode_in->vtotal;
5908                 timing_out->v_addressable = mode_in->vdisplay;
5909                 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5910                 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5911                 timing_out->pix_clk_100hz = mode_in->clock * 10;
5912         } else {
5913                 timing_out->h_addressable = mode_in->crtc_hdisplay;
5914                 timing_out->h_total = mode_in->crtc_htotal;
5915                 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5916                 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5917                 timing_out->v_total = mode_in->crtc_vtotal;
5918                 timing_out->v_addressable = mode_in->crtc_vdisplay;
5919                 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5920                 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5921                 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5922         }
5923
5924         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5925
5926         stream->output_color_space = get_output_color_space(timing_out);
5927
5928         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5929         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5930         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5931                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5932                     drm_mode_is_420_also(info, mode_in) &&
5933                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5934                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5935                         adjust_colour_depth_from_display_info(timing_out, info);
5936                 }
5937         }
5938 }
5939
5940 static void fill_audio_info(struct audio_info *audio_info,
5941                             const struct drm_connector *drm_connector,
5942                             const struct dc_sink *dc_sink)
5943 {
5944         int i = 0;
5945         int cea_revision = 0;
5946         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5947
5948         audio_info->manufacture_id = edid_caps->manufacturer_id;
5949         audio_info->product_id = edid_caps->product_id;
5950
5951         cea_revision = drm_connector->display_info.cea_rev;
5952
5953         strscpy(audio_info->display_name,
5954                 edid_caps->display_name,
5955                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5956
5957         if (cea_revision >= 3) {
5958                 audio_info->mode_count = edid_caps->audio_mode_count;
5959
5960                 for (i = 0; i < audio_info->mode_count; ++i) {
5961                         audio_info->modes[i].format_code =
5962                                         (enum audio_format_code)
5963                                         (edid_caps->audio_modes[i].format_code);
5964                         audio_info->modes[i].channel_count =
5965                                         edid_caps->audio_modes[i].channel_count;
5966                         audio_info->modes[i].sample_rates.all =
5967                                         edid_caps->audio_modes[i].sample_rate;
5968                         audio_info->modes[i].sample_size =
5969                                         edid_caps->audio_modes[i].sample_size;
5970                 }
5971         }
5972
5973         audio_info->flags.all = edid_caps->speaker_flags;
5974
5975         /* TODO: We only check for the progressive mode, check for interlace mode too */
5976         if (drm_connector->latency_present[0]) {
5977                 audio_info->video_latency = drm_connector->video_latency[0];
5978                 audio_info->audio_latency = drm_connector->audio_latency[0];
5979         }
5980
5981         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5982
5983 }
5984
5985 static void
5986 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5987                                       struct drm_display_mode *dst_mode)
5988 {
5989         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5990         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5991         dst_mode->crtc_clock = src_mode->crtc_clock;
5992         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5993         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5994         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5995         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5996         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5997         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5998         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5999         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
6000         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
6001         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
6002         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
6003 }
6004
6005 static void
6006 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
6007                                         const struct drm_display_mode *native_mode,
6008                                         bool scale_enabled)
6009 {
6010         if (scale_enabled) {
6011                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6012         } else if (native_mode->clock == drm_mode->clock &&
6013                         native_mode->htotal == drm_mode->htotal &&
6014                         native_mode->vtotal == drm_mode->vtotal) {
6015                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
6016         } else {
6017                 /* no scaling nor amdgpu inserted, no need to patch */
6018         }
6019 }
6020
6021 static struct dc_sink *
6022 create_fake_sink(struct amdgpu_dm_connector *aconnector)
6023 {
6024         struct dc_sink_init_data sink_init_data = { 0 };
6025         struct dc_sink *sink = NULL;
6026         sink_init_data.link = aconnector->dc_link;
6027         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
6028
6029         sink = dc_sink_create(&sink_init_data);
6030         if (!sink) {
6031                 DRM_ERROR("Failed to create sink!\n");
6032                 return NULL;
6033         }
6034         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
6035
6036         return sink;
6037 }
6038
6039 static void set_multisync_trigger_params(
6040                 struct dc_stream_state *stream)
6041 {
6042         struct dc_stream_state *master = NULL;
6043
6044         if (stream->triggered_crtc_reset.enabled) {
6045                 master = stream->triggered_crtc_reset.event_source;
6046                 stream->triggered_crtc_reset.event =
6047                         master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6048                         CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6049                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6050         }
6051 }
6052
6053 static void set_master_stream(struct dc_stream_state *stream_set[],
6054                               int stream_count)
6055 {
6056         int j, highest_rfr = 0, master_stream = 0;
6057
6058         for (j = 0;  j < stream_count; j++) {
6059                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6060                         int refresh_rate = 0;
6061
6062                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6063                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6064                         if (refresh_rate > highest_rfr) {
6065                                 highest_rfr = refresh_rate;
6066                                 master_stream = j;
6067                         }
6068                 }
6069         }
6070         for (j = 0;  j < stream_count; j++) {
6071                 if (stream_set[j])
6072                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6073         }
6074 }
6075
6076 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6077 {
6078         int i = 0;
6079         struct dc_stream_state *stream;
6080
6081         if (context->stream_count < 2)
6082                 return;
6083         for (i = 0; i < context->stream_count ; i++) {
6084                 if (!context->streams[i])
6085                         continue;
6086                 /*
6087                  * TODO: add a function to read AMD VSDB bits and set
6088                  * crtc_sync_master.multi_sync_enabled flag
6089                  * For now it's set to false
6090                  */
6091         }
6092
6093         set_master_stream(context->streams, context->stream_count);
6094
6095         for (i = 0; i < context->stream_count ; i++) {
6096                 stream = context->streams[i];
6097
6098                 if (!stream)
6099                         continue;
6100
6101                 set_multisync_trigger_params(stream);
6102         }
6103 }
6104
6105 #if defined(CONFIG_DRM_AMD_DC_DCN)
6106 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6107                                                         struct dc_sink *sink, struct dc_stream_state *stream,
6108                                                         struct dsc_dec_dpcd_caps *dsc_caps)
6109 {
6110         stream->timing.flags.DSC = 0;
6111         dsc_caps->is_dsc_supported = false;
6112
6113         if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6114                 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6115                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6116                         sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6117                         dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6118                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6119                                 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6120                                 dsc_caps);
6121         }
6122 }
6123
6124 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6125                                     struct dc_sink *sink, struct dc_stream_state *stream,
6126                                     struct dsc_dec_dpcd_caps *dsc_caps,
6127                                     uint32_t max_dsc_target_bpp_limit_override)
6128 {
6129         const struct dc_link_settings *verified_link_cap = NULL;
6130         uint32_t link_bw_in_kbps;
6131         uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6132         struct dc *dc = sink->ctx->dc;
6133         struct dc_dsc_bw_range bw_range = {0};
6134         struct dc_dsc_config dsc_cfg = {0};
6135
6136         verified_link_cap = dc_link_get_link_cap(stream->link);
6137         link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6138         edp_min_bpp_x16 = 8 * 16;
6139         edp_max_bpp_x16 = 8 * 16;
6140
6141         if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6142                 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6143
6144         if (edp_max_bpp_x16 < edp_min_bpp_x16)
6145                 edp_min_bpp_x16 = edp_max_bpp_x16;
6146
6147         if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6148                                 dc->debug.dsc_min_slice_height_override,
6149                                 edp_min_bpp_x16, edp_max_bpp_x16,
6150                                 dsc_caps,
6151                                 &stream->timing,
6152                                 &bw_range)) {
6153
6154                 if (bw_range.max_kbps < link_bw_in_kbps) {
6155                         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6156                                         dsc_caps,
6157                                         dc->debug.dsc_min_slice_height_override,
6158                                         max_dsc_target_bpp_limit_override,
6159                                         0,
6160                                         &stream->timing,
6161                                         &dsc_cfg)) {
6162                                 stream->timing.dsc_cfg = dsc_cfg;
6163                                 stream->timing.flags.DSC = 1;
6164                                 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6165                         }
6166                         return;
6167                 }
6168         }
6169
6170         if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6171                                 dsc_caps,
6172                                 dc->debug.dsc_min_slice_height_override,
6173                                 max_dsc_target_bpp_limit_override,
6174                                 link_bw_in_kbps,
6175                                 &stream->timing,
6176                                 &dsc_cfg)) {
6177                 stream->timing.dsc_cfg = dsc_cfg;
6178                 stream->timing.flags.DSC = 1;
6179         }
6180 }
6181
6182 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6183                                                                                 struct dc_sink *sink, struct dc_stream_state *stream,
6184                                                                                 struct dsc_dec_dpcd_caps *dsc_caps)
6185 {
6186         struct drm_connector *drm_connector = &aconnector->base;
6187         uint32_t link_bandwidth_kbps;
6188         uint32_t max_dsc_target_bpp_limit_override = 0;
6189         struct dc *dc = sink->ctx->dc;
6190         uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6191         uint32_t dsc_max_supported_bw_in_kbps;
6192
6193         link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6194                                                         dc_link_get_link_cap(aconnector->dc_link));
6195
6196         if (stream->link && stream->link->local_sink)
6197                 max_dsc_target_bpp_limit_override =
6198                         stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6199
6200         /* Set DSC policy according to dsc_clock_en */
6201         dc_dsc_policy_set_enable_dsc_when_not_needed(
6202                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6203
6204         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6205             dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6206
6207                 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6208
6209         } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6210                 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6211                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6212                                                 dsc_caps,
6213                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6214                                                 max_dsc_target_bpp_limit_override,
6215                                                 link_bandwidth_kbps,
6216                                                 &stream->timing,
6217                                                 &stream->timing.dsc_cfg)) {
6218                                 stream->timing.flags.DSC = 1;
6219                                 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6220                                                                  __func__, drm_connector->name);
6221                         }
6222                 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6223                         timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6224                         max_supported_bw_in_kbps = link_bandwidth_kbps;
6225                         dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6226
6227                         if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6228                                         max_supported_bw_in_kbps > 0 &&
6229                                         dsc_max_supported_bw_in_kbps > 0)
6230                                 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6231                                                 dsc_caps,
6232                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6233                                                 max_dsc_target_bpp_limit_override,
6234                                                 dsc_max_supported_bw_in_kbps,
6235                                                 &stream->timing,
6236                                                 &stream->timing.dsc_cfg)) {
6237                                         stream->timing.flags.DSC = 1;
6238                                         DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6239                                                                          __func__, drm_connector->name);
6240                                 }
6241                 }
6242         }
6243
6244         /* Overwrite the stream flag if DSC is enabled through debugfs */
6245         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6246                 stream->timing.flags.DSC = 1;
6247
6248         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6249                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6250
6251         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6252                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6253
6254         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6255                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6256 }
6257 #endif /* CONFIG_DRM_AMD_DC_DCN */
6258
6259 /**
6260  * DOC: FreeSync Video
6261  *
6262  * When a userspace application wants to play a video, the content follows a
6263  * standard format definition that usually specifies the FPS for that format.
6264  * The below list illustrates some video format and the expected FPS,
6265  * respectively:
6266  *
6267  * - TV/NTSC (23.976 FPS)
6268  * - Cinema (24 FPS)
6269  * - TV/PAL (25 FPS)
6270  * - TV/NTSC (29.97 FPS)
6271  * - TV/NTSC (30 FPS)
6272  * - Cinema HFR (48 FPS)
6273  * - TV/PAL (50 FPS)
6274  * - Commonly used (60 FPS)
6275  * - Multiples of 24 (48,72,96,120 FPS)
6276  *
6277  * The list of standards video format is not huge and can be added to the
6278  * connector modeset list beforehand. With that, userspace can leverage
6279  * FreeSync to extends the front porch in order to attain the target refresh
6280  * rate. Such a switch will happen seamlessly, without screen blanking or
6281  * reprogramming of the output in any other way. If the userspace requests a
6282  * modesetting change compatible with FreeSync modes that only differ in the
6283  * refresh rate, DC will skip the full update and avoid blink during the
6284  * transition. For example, the video player can change the modesetting from
6285  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6286  * causing any display blink. This same concept can be applied to a mode
6287  * setting change.
6288  */
6289 static struct drm_display_mode *
6290 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6291                           bool use_probed_modes)
6292 {
6293         struct drm_display_mode *m, *m_pref = NULL;
6294         u16 current_refresh, highest_refresh;
6295         struct list_head *list_head = use_probed_modes ?
6296                                                     &aconnector->base.probed_modes :
6297                                                     &aconnector->base.modes;
6298
6299         if (aconnector->freesync_vid_base.clock != 0)
6300                 return &aconnector->freesync_vid_base;
6301
6302         /* Find the preferred mode */
6303         list_for_each_entry (m, list_head, head) {
6304                 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6305                         m_pref = m;
6306                         break;
6307                 }
6308         }
6309
6310         if (!m_pref) {
6311                 /* Probably an EDID with no preferred mode. Fallback to first entry */
6312                 m_pref = list_first_entry_or_null(
6313                         &aconnector->base.modes, struct drm_display_mode, head);
6314                 if (!m_pref) {
6315                         DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6316                         return NULL;
6317                 }
6318         }
6319
6320         highest_refresh = drm_mode_vrefresh(m_pref);
6321
6322         /*
6323          * Find the mode with highest refresh rate with same resolution.
6324          * For some monitors, preferred mode is not the mode with highest
6325          * supported refresh rate.
6326          */
6327         list_for_each_entry (m, list_head, head) {
6328                 current_refresh  = drm_mode_vrefresh(m);
6329
6330                 if (m->hdisplay == m_pref->hdisplay &&
6331                     m->vdisplay == m_pref->vdisplay &&
6332                     highest_refresh < current_refresh) {
6333                         highest_refresh = current_refresh;
6334                         m_pref = m;
6335                 }
6336         }
6337
6338         drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
6339         return m_pref;
6340 }
6341
6342 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6343                                    struct amdgpu_dm_connector *aconnector)
6344 {
6345         struct drm_display_mode *high_mode;
6346         int timing_diff;
6347
6348         high_mode = get_highest_refresh_rate_mode(aconnector, false);
6349         if (!high_mode || !mode)
6350                 return false;
6351
6352         timing_diff = high_mode->vtotal - mode->vtotal;
6353
6354         if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6355             high_mode->hdisplay != mode->hdisplay ||
6356             high_mode->vdisplay != mode->vdisplay ||
6357             high_mode->hsync_start != mode->hsync_start ||
6358             high_mode->hsync_end != mode->hsync_end ||
6359             high_mode->htotal != mode->htotal ||
6360             high_mode->hskew != mode->hskew ||
6361             high_mode->vscan != mode->vscan ||
6362             high_mode->vsync_start - mode->vsync_start != timing_diff ||
6363             high_mode->vsync_end - mode->vsync_end != timing_diff)
6364                 return false;
6365         else
6366                 return true;
6367 }
6368
6369 static struct dc_stream_state *
6370 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6371                        const struct drm_display_mode *drm_mode,
6372                        const struct dm_connector_state *dm_state,
6373                        const struct dc_stream_state *old_stream,
6374                        int requested_bpc)
6375 {
6376         struct drm_display_mode *preferred_mode = NULL;
6377         struct drm_connector *drm_connector;
6378         const struct drm_connector_state *con_state =
6379                 dm_state ? &dm_state->base : NULL;
6380         struct dc_stream_state *stream = NULL;
6381         struct drm_display_mode mode = *drm_mode;
6382         struct drm_display_mode saved_mode;
6383         struct drm_display_mode *freesync_mode = NULL;
6384         bool native_mode_found = false;
6385         bool recalculate_timing = false;
6386         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6387         int mode_refresh;
6388         int preferred_refresh = 0;
6389 #if defined(CONFIG_DRM_AMD_DC_DCN)
6390         struct dsc_dec_dpcd_caps dsc_caps;
6391 #endif
6392         struct dc_sink *sink = NULL;
6393
6394         memset(&saved_mode, 0, sizeof(saved_mode));
6395
6396         if (aconnector == NULL) {
6397                 DRM_ERROR("aconnector is NULL!\n");
6398                 return stream;
6399         }
6400
6401         drm_connector = &aconnector->base;
6402
6403         if (!aconnector->dc_sink) {
6404                 sink = create_fake_sink(aconnector);
6405                 if (!sink)
6406                         return stream;
6407         } else {
6408                 sink = aconnector->dc_sink;
6409                 dc_sink_retain(sink);
6410         }
6411
6412         stream = dc_create_stream_for_sink(sink);
6413
6414         if (stream == NULL) {
6415                 DRM_ERROR("Failed to create stream for sink!\n");
6416                 goto finish;
6417         }
6418
6419         stream->dm_stream_context = aconnector;
6420
6421         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6422                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6423
6424         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6425                 /* Search for preferred mode */
6426                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6427                         native_mode_found = true;
6428                         break;
6429                 }
6430         }
6431         if (!native_mode_found)
6432                 preferred_mode = list_first_entry_or_null(
6433                                 &aconnector->base.modes,
6434                                 struct drm_display_mode,
6435                                 head);
6436
6437         mode_refresh = drm_mode_vrefresh(&mode);
6438
6439         if (preferred_mode == NULL) {
6440                 /*
6441                  * This may not be an error, the use case is when we have no
6442                  * usermode calls to reset and set mode upon hotplug. In this
6443                  * case, we call set mode ourselves to restore the previous mode
6444                  * and the modelist may not be filled in in time.
6445                  */
6446                 DRM_DEBUG_DRIVER("No preferred mode found\n");
6447         } else {
6448                 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6449                 if (recalculate_timing) {
6450                         freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6451                         drm_mode_copy(&saved_mode, &mode);
6452                         drm_mode_copy(&mode, freesync_mode);
6453                 } else {
6454                         decide_crtc_timing_for_drm_display_mode(
6455                                 &mode, preferred_mode, scale);
6456
6457                         preferred_refresh = drm_mode_vrefresh(preferred_mode);
6458                 }
6459         }
6460
6461         if (recalculate_timing)
6462                 drm_mode_set_crtcinfo(&saved_mode, 0);
6463         else if (!dm_state)
6464                 drm_mode_set_crtcinfo(&mode, 0);
6465
6466        /*
6467         * If scaling is enabled and refresh rate didn't change
6468         * we copy the vic and polarities of the old timings
6469         */
6470         if (!scale || mode_refresh != preferred_refresh)
6471                 fill_stream_properties_from_drm_display_mode(
6472                         stream, &mode, &aconnector->base, con_state, NULL,
6473                         requested_bpc);
6474         else
6475                 fill_stream_properties_from_drm_display_mode(
6476                         stream, &mode, &aconnector->base, con_state, old_stream,
6477                         requested_bpc);
6478
6479 #if defined(CONFIG_DRM_AMD_DC_DCN)
6480         /* SST DSC determination policy */
6481         update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6482         if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6483                 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6484 #endif
6485
6486         update_stream_scaling_settings(&mode, dm_state, stream);
6487
6488         fill_audio_info(
6489                 &stream->audio_info,
6490                 drm_connector,
6491                 sink);
6492
6493         update_stream_signal(stream, sink);
6494
6495         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6496                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6497
6498         if (stream->link->psr_settings.psr_feature_enabled) {
6499                 //
6500                 // should decide stream support vsc sdp colorimetry capability
6501                 // before building vsc info packet
6502                 //
6503                 stream->use_vsc_sdp_for_colorimetry = false;
6504                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6505                         stream->use_vsc_sdp_for_colorimetry =
6506                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6507                 } else {
6508                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6509                                 stream->use_vsc_sdp_for_colorimetry = true;
6510                 }
6511                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
6512                 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6513
6514         }
6515 finish:
6516         dc_sink_release(sink);
6517
6518         return stream;
6519 }
6520
6521 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6522 {
6523         drm_crtc_cleanup(crtc);
6524         kfree(crtc);
6525 }
6526
6527 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6528                                   struct drm_crtc_state *state)
6529 {
6530         struct dm_crtc_state *cur = to_dm_crtc_state(state);
6531
6532         /* TODO Destroy dc_stream objects are stream object is flattened */
6533         if (cur->stream)
6534                 dc_stream_release(cur->stream);
6535
6536
6537         __drm_atomic_helper_crtc_destroy_state(state);
6538
6539
6540         kfree(state);
6541 }
6542
6543 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6544 {
6545         struct dm_crtc_state *state;
6546
6547         if (crtc->state)
6548                 dm_crtc_destroy_state(crtc, crtc->state);
6549
6550         state = kzalloc(sizeof(*state), GFP_KERNEL);
6551         if (WARN_ON(!state))
6552                 return;
6553
6554         __drm_atomic_helper_crtc_reset(crtc, &state->base);
6555 }
6556
6557 static struct drm_crtc_state *
6558 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6559 {
6560         struct dm_crtc_state *state, *cur;
6561
6562         cur = to_dm_crtc_state(crtc->state);
6563
6564         if (WARN_ON(!crtc->state))
6565                 return NULL;
6566
6567         state = kzalloc(sizeof(*state), GFP_KERNEL);
6568         if (!state)
6569                 return NULL;
6570
6571         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6572
6573         if (cur->stream) {
6574                 state->stream = cur->stream;
6575                 dc_stream_retain(state->stream);
6576         }
6577
6578         state->active_planes = cur->active_planes;
6579         state->vrr_infopacket = cur->vrr_infopacket;
6580         state->abm_level = cur->abm_level;
6581         state->vrr_supported = cur->vrr_supported;
6582         state->freesync_config = cur->freesync_config;
6583         state->cm_has_degamma = cur->cm_has_degamma;
6584         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6585         state->force_dpms_off = cur->force_dpms_off;
6586         /* TODO Duplicate dc_stream after objects are stream object is flattened */
6587
6588         return &state->base;
6589 }
6590
6591 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6592 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6593 {
6594         crtc_debugfs_init(crtc);
6595
6596         return 0;
6597 }
6598 #endif
6599
6600 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6601 {
6602         enum dc_irq_source irq_source;
6603         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6604         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6605         int rc;
6606
6607         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6608
6609         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6610
6611         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6612                       acrtc->crtc_id, enable ? "en" : "dis", rc);
6613         return rc;
6614 }
6615
6616 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6617 {
6618         enum dc_irq_source irq_source;
6619         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6620         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6621         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6622         struct amdgpu_display_manager *dm = &adev->dm;
6623         struct vblank_control_work *work;
6624         int rc = 0;
6625
6626         if (enable) {
6627                 /* vblank irq on -> Only need vupdate irq in vrr mode */
6628                 if (amdgpu_dm_vrr_active(acrtc_state))
6629                         rc = dm_set_vupdate_irq(crtc, true);
6630         } else {
6631                 /* vblank irq off -> vupdate irq off */
6632                 rc = dm_set_vupdate_irq(crtc, false);
6633         }
6634
6635         if (rc)
6636                 return rc;
6637
6638         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6639
6640         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6641                 return -EBUSY;
6642
6643         if (amdgpu_in_reset(adev))
6644                 return 0;
6645
6646         if (dm->vblank_control_workqueue) {
6647                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6648                 if (!work)
6649                         return -ENOMEM;
6650
6651                 INIT_WORK(&work->work, vblank_control_worker);
6652                 work->dm = dm;
6653                 work->acrtc = acrtc;
6654                 work->enable = enable;
6655
6656                 if (acrtc_state->stream) {
6657                         dc_stream_retain(acrtc_state->stream);
6658                         work->stream = acrtc_state->stream;
6659                 }
6660
6661                 queue_work(dm->vblank_control_workqueue, &work->work);
6662         }
6663
6664         return 0;
6665 }
6666
6667 static int dm_enable_vblank(struct drm_crtc *crtc)
6668 {
6669         return dm_set_vblank(crtc, true);
6670 }
6671
6672 static void dm_disable_vblank(struct drm_crtc *crtc)
6673 {
6674         dm_set_vblank(crtc, false);
6675 }
6676
6677 /* Implemented only the options currently availible for the driver */
6678 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6679         .reset = dm_crtc_reset_state,
6680         .destroy = amdgpu_dm_crtc_destroy,
6681         .set_config = drm_atomic_helper_set_config,
6682         .page_flip = drm_atomic_helper_page_flip,
6683         .atomic_duplicate_state = dm_crtc_duplicate_state,
6684         .atomic_destroy_state = dm_crtc_destroy_state,
6685         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6686         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6687         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6688         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6689         .enable_vblank = dm_enable_vblank,
6690         .disable_vblank = dm_disable_vblank,
6691         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6692 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6693         .late_register = amdgpu_dm_crtc_late_register,
6694 #endif
6695 };
6696
6697 static enum drm_connector_status
6698 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6699 {
6700         bool connected;
6701         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6702
6703         /*
6704          * Notes:
6705          * 1. This interface is NOT called in context of HPD irq.
6706          * 2. This interface *is called* in context of user-mode ioctl. Which
6707          * makes it a bad place for *any* MST-related activity.
6708          */
6709
6710         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6711             !aconnector->fake_enable)
6712                 connected = (aconnector->dc_sink != NULL);
6713         else
6714                 connected = (aconnector->base.force == DRM_FORCE_ON);
6715
6716         update_subconnector_property(aconnector);
6717
6718         return (connected ? connector_status_connected :
6719                         connector_status_disconnected);
6720 }
6721
6722 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6723                                             struct drm_connector_state *connector_state,
6724                                             struct drm_property *property,
6725                                             uint64_t val)
6726 {
6727         struct drm_device *dev = connector->dev;
6728         struct amdgpu_device *adev = drm_to_adev(dev);
6729         struct dm_connector_state *dm_old_state =
6730                 to_dm_connector_state(connector->state);
6731         struct dm_connector_state *dm_new_state =
6732                 to_dm_connector_state(connector_state);
6733
6734         int ret = -EINVAL;
6735
6736         if (property == dev->mode_config.scaling_mode_property) {
6737                 enum amdgpu_rmx_type rmx_type;
6738
6739                 switch (val) {
6740                 case DRM_MODE_SCALE_CENTER:
6741                         rmx_type = RMX_CENTER;
6742                         break;
6743                 case DRM_MODE_SCALE_ASPECT:
6744                         rmx_type = RMX_ASPECT;
6745                         break;
6746                 case DRM_MODE_SCALE_FULLSCREEN:
6747                         rmx_type = RMX_FULL;
6748                         break;
6749                 case DRM_MODE_SCALE_NONE:
6750                 default:
6751                         rmx_type = RMX_OFF;
6752                         break;
6753                 }
6754
6755                 if (dm_old_state->scaling == rmx_type)
6756                         return 0;
6757
6758                 dm_new_state->scaling = rmx_type;
6759                 ret = 0;
6760         } else if (property == adev->mode_info.underscan_hborder_property) {
6761                 dm_new_state->underscan_hborder = val;
6762                 ret = 0;
6763         } else if (property == adev->mode_info.underscan_vborder_property) {
6764                 dm_new_state->underscan_vborder = val;
6765                 ret = 0;
6766         } else if (property == adev->mode_info.underscan_property) {
6767                 dm_new_state->underscan_enable = val;
6768                 ret = 0;
6769         } else if (property == adev->mode_info.abm_level_property) {
6770                 dm_new_state->abm_level = val;
6771                 ret = 0;
6772         }
6773
6774         return ret;
6775 }
6776
6777 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6778                                             const struct drm_connector_state *state,
6779                                             struct drm_property *property,
6780                                             uint64_t *val)
6781 {
6782         struct drm_device *dev = connector->dev;
6783         struct amdgpu_device *adev = drm_to_adev(dev);
6784         struct dm_connector_state *dm_state =
6785                 to_dm_connector_state(state);
6786         int ret = -EINVAL;
6787
6788         if (property == dev->mode_config.scaling_mode_property) {
6789                 switch (dm_state->scaling) {
6790                 case RMX_CENTER:
6791                         *val = DRM_MODE_SCALE_CENTER;
6792                         break;
6793                 case RMX_ASPECT:
6794                         *val = DRM_MODE_SCALE_ASPECT;
6795                         break;
6796                 case RMX_FULL:
6797                         *val = DRM_MODE_SCALE_FULLSCREEN;
6798                         break;
6799                 case RMX_OFF:
6800                 default:
6801                         *val = DRM_MODE_SCALE_NONE;
6802                         break;
6803                 }
6804                 ret = 0;
6805         } else if (property == adev->mode_info.underscan_hborder_property) {
6806                 *val = dm_state->underscan_hborder;
6807                 ret = 0;
6808         } else if (property == adev->mode_info.underscan_vborder_property) {
6809                 *val = dm_state->underscan_vborder;
6810                 ret = 0;
6811         } else if (property == adev->mode_info.underscan_property) {
6812                 *val = dm_state->underscan_enable;
6813                 ret = 0;
6814         } else if (property == adev->mode_info.abm_level_property) {
6815                 *val = dm_state->abm_level;
6816                 ret = 0;
6817         }
6818
6819         return ret;
6820 }
6821
6822 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6823 {
6824         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6825
6826         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6827 }
6828
6829 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6830 {
6831         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6832         const struct dc_link *link = aconnector->dc_link;
6833         struct amdgpu_device *adev = drm_to_adev(connector->dev);
6834         struct amdgpu_display_manager *dm = &adev->dm;
6835         int i;
6836
6837         /*
6838          * Call only if mst_mgr was iniitalized before since it's not done
6839          * for all connector types.
6840          */
6841         if (aconnector->mst_mgr.dev)
6842                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6843
6844 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6845         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6846         for (i = 0; i < dm->num_of_edps; i++) {
6847                 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6848                         backlight_device_unregister(dm->backlight_dev[i]);
6849                         dm->backlight_dev[i] = NULL;
6850                 }
6851         }
6852 #endif
6853
6854         if (aconnector->dc_em_sink)
6855                 dc_sink_release(aconnector->dc_em_sink);
6856         aconnector->dc_em_sink = NULL;
6857         if (aconnector->dc_sink)
6858                 dc_sink_release(aconnector->dc_sink);
6859         aconnector->dc_sink = NULL;
6860
6861         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6862         drm_connector_unregister(connector);
6863         drm_connector_cleanup(connector);
6864         if (aconnector->i2c) {
6865                 i2c_del_adapter(&aconnector->i2c->base);
6866                 kfree(aconnector->i2c);
6867         }
6868         kfree(aconnector->dm_dp_aux.aux.name);
6869
6870         kfree(connector);
6871 }
6872
6873 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6874 {
6875         struct dm_connector_state *state =
6876                 to_dm_connector_state(connector->state);
6877
6878         if (connector->state)
6879                 __drm_atomic_helper_connector_destroy_state(connector->state);
6880
6881         kfree(state);
6882
6883         state = kzalloc(sizeof(*state), GFP_KERNEL);
6884
6885         if (state) {
6886                 state->scaling = RMX_OFF;
6887                 state->underscan_enable = false;
6888                 state->underscan_hborder = 0;
6889                 state->underscan_vborder = 0;
6890                 state->base.max_requested_bpc = 8;
6891                 state->vcpi_slots = 0;
6892                 state->pbn = 0;
6893                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6894                         state->abm_level = amdgpu_dm_abm_level;
6895
6896                 __drm_atomic_helper_connector_reset(connector, &state->base);
6897         }
6898 }
6899
6900 struct drm_connector_state *
6901 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6902 {
6903         struct dm_connector_state *state =
6904                 to_dm_connector_state(connector->state);
6905
6906         struct dm_connector_state *new_state =
6907                         kmemdup(state, sizeof(*state), GFP_KERNEL);
6908
6909         if (!new_state)
6910                 return NULL;
6911
6912         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6913
6914         new_state->freesync_capable = state->freesync_capable;
6915         new_state->abm_level = state->abm_level;
6916         new_state->scaling = state->scaling;
6917         new_state->underscan_enable = state->underscan_enable;
6918         new_state->underscan_hborder = state->underscan_hborder;
6919         new_state->underscan_vborder = state->underscan_vborder;
6920         new_state->vcpi_slots = state->vcpi_slots;
6921         new_state->pbn = state->pbn;
6922         return &new_state->base;
6923 }
6924
6925 static int
6926 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6927 {
6928         struct amdgpu_dm_connector *amdgpu_dm_connector =
6929                 to_amdgpu_dm_connector(connector);
6930         int r;
6931
6932         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6933             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6934                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6935                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6936                 if (r)
6937                         return r;
6938         }
6939
6940 #if defined(CONFIG_DEBUG_FS)
6941         connector_debugfs_init(amdgpu_dm_connector);
6942 #endif
6943
6944         return 0;
6945 }
6946
6947 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6948         .reset = amdgpu_dm_connector_funcs_reset,
6949         .detect = amdgpu_dm_connector_detect,
6950         .fill_modes = drm_helper_probe_single_connector_modes,
6951         .destroy = amdgpu_dm_connector_destroy,
6952         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6953         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6954         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6955         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6956         .late_register = amdgpu_dm_connector_late_register,
6957         .early_unregister = amdgpu_dm_connector_unregister
6958 };
6959
6960 static int get_modes(struct drm_connector *connector)
6961 {
6962         return amdgpu_dm_connector_get_modes(connector);
6963 }
6964
6965 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6966 {
6967         struct dc_sink_init_data init_params = {
6968                         .link = aconnector->dc_link,
6969                         .sink_signal = SIGNAL_TYPE_VIRTUAL
6970         };
6971         struct edid *edid;
6972
6973         if (!aconnector->base.edid_blob_ptr) {
6974                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6975                                 aconnector->base.name);
6976
6977                 aconnector->base.force = DRM_FORCE_OFF;
6978                 aconnector->base.override_edid = false;
6979                 return;
6980         }
6981
6982         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6983
6984         aconnector->edid = edid;
6985
6986         aconnector->dc_em_sink = dc_link_add_remote_sink(
6987                 aconnector->dc_link,
6988                 (uint8_t *)edid,
6989                 (edid->extensions + 1) * EDID_LENGTH,
6990                 &init_params);
6991
6992         if (aconnector->base.force == DRM_FORCE_ON) {
6993                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6994                 aconnector->dc_link->local_sink :
6995                 aconnector->dc_em_sink;
6996                 dc_sink_retain(aconnector->dc_sink);
6997         }
6998 }
6999
7000 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
7001 {
7002         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
7003
7004         /*
7005          * In case of headless boot with force on for DP managed connector
7006          * Those settings have to be != 0 to get initial modeset
7007          */
7008         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
7009                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
7010                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
7011         }
7012
7013
7014         aconnector->base.override_edid = true;
7015         create_eml_sink(aconnector);
7016 }
7017
7018 struct dc_stream_state *
7019 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
7020                                 const struct drm_display_mode *drm_mode,
7021                                 const struct dm_connector_state *dm_state,
7022                                 const struct dc_stream_state *old_stream)
7023 {
7024         struct drm_connector *connector = &aconnector->base;
7025         struct amdgpu_device *adev = drm_to_adev(connector->dev);
7026         struct dc_stream_state *stream;
7027         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
7028         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
7029         enum dc_status dc_result = DC_OK;
7030
7031         do {
7032                 stream = create_stream_for_sink(aconnector, drm_mode,
7033                                                 dm_state, old_stream,
7034                                                 requested_bpc);
7035                 if (stream == NULL) {
7036                         DRM_ERROR("Failed to create stream for sink!\n");
7037                         break;
7038                 }
7039
7040                 dc_result = dc_validate_stream(adev->dm.dc, stream);
7041
7042                 if (dc_result != DC_OK) {
7043                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7044                                       drm_mode->hdisplay,
7045                                       drm_mode->vdisplay,
7046                                       drm_mode->clock,
7047                                       dc_result,
7048                                       dc_status_to_str(dc_result));
7049
7050                         dc_stream_release(stream);
7051                         stream = NULL;
7052                         requested_bpc -= 2; /* lower bpc to retry validation */
7053                 }
7054
7055         } while (stream == NULL && requested_bpc >= 6);
7056
7057         if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7058                 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7059
7060                 aconnector->force_yuv420_output = true;
7061                 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7062                                                 dm_state, old_stream);
7063                 aconnector->force_yuv420_output = false;
7064         }
7065
7066         return stream;
7067 }
7068
7069 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7070                                    struct drm_display_mode *mode)
7071 {
7072         int result = MODE_ERROR;
7073         struct dc_sink *dc_sink;
7074         /* TODO: Unhardcode stream count */
7075         struct dc_stream_state *stream;
7076         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7077
7078         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7079                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7080                 return result;
7081
7082         /*
7083          * Only run this the first time mode_valid is called to initilialize
7084          * EDID mgmt
7085          */
7086         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7087                 !aconnector->dc_em_sink)
7088                 handle_edid_mgmt(aconnector);
7089
7090         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7091
7092         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7093                                 aconnector->base.force != DRM_FORCE_ON) {
7094                 DRM_ERROR("dc_sink is NULL!\n");
7095                 goto fail;
7096         }
7097
7098         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7099         if (stream) {
7100                 dc_stream_release(stream);
7101                 result = MODE_OK;
7102         }
7103
7104 fail:
7105         /* TODO: error handling*/
7106         return result;
7107 }
7108
7109 static int fill_hdr_info_packet(const struct drm_connector_state *state,
7110                                 struct dc_info_packet *out)
7111 {
7112         struct hdmi_drm_infoframe frame;
7113         unsigned char buf[30]; /* 26 + 4 */
7114         ssize_t len;
7115         int ret, i;
7116
7117         memset(out, 0, sizeof(*out));
7118
7119         if (!state->hdr_output_metadata)
7120                 return 0;
7121
7122         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7123         if (ret)
7124                 return ret;
7125
7126         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7127         if (len < 0)
7128                 return (int)len;
7129
7130         /* Static metadata is a fixed 26 bytes + 4 byte header. */
7131         if (len != 30)
7132                 return -EINVAL;
7133
7134         /* Prepare the infopacket for DC. */
7135         switch (state->connector->connector_type) {
7136         case DRM_MODE_CONNECTOR_HDMIA:
7137                 out->hb0 = 0x87; /* type */
7138                 out->hb1 = 0x01; /* version */
7139                 out->hb2 = 0x1A; /* length */
7140                 out->sb[0] = buf[3]; /* checksum */
7141                 i = 1;
7142                 break;
7143
7144         case DRM_MODE_CONNECTOR_DisplayPort:
7145         case DRM_MODE_CONNECTOR_eDP:
7146                 out->hb0 = 0x00; /* sdp id, zero */
7147                 out->hb1 = 0x87; /* type */
7148                 out->hb2 = 0x1D; /* payload len - 1 */
7149                 out->hb3 = (0x13 << 2); /* sdp version */
7150                 out->sb[0] = 0x01; /* version */
7151                 out->sb[1] = 0x1A; /* length */
7152                 i = 2;
7153                 break;
7154
7155         default:
7156                 return -EINVAL;
7157         }
7158
7159         memcpy(&out->sb[i], &buf[4], 26);
7160         out->valid = true;
7161
7162         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7163                        sizeof(out->sb), false);
7164
7165         return 0;
7166 }
7167
7168 static int
7169 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7170                                  struct drm_atomic_state *state)
7171 {
7172         struct drm_connector_state *new_con_state =
7173                 drm_atomic_get_new_connector_state(state, conn);
7174         struct drm_connector_state *old_con_state =
7175                 drm_atomic_get_old_connector_state(state, conn);
7176         struct drm_crtc *crtc = new_con_state->crtc;
7177         struct drm_crtc_state *new_crtc_state;
7178         int ret;
7179
7180         trace_amdgpu_dm_connector_atomic_check(new_con_state);
7181
7182         if (!crtc)
7183                 return 0;
7184
7185         if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7186                 struct dc_info_packet hdr_infopacket;
7187
7188                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7189                 if (ret)
7190                         return ret;
7191
7192                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7193                 if (IS_ERR(new_crtc_state))
7194                         return PTR_ERR(new_crtc_state);
7195
7196                 /*
7197                  * DC considers the stream backends changed if the
7198                  * static metadata changes. Forcing the modeset also
7199                  * gives a simple way for userspace to switch from
7200                  * 8bpc to 10bpc when setting the metadata to enter
7201                  * or exit HDR.
7202                  *
7203                  * Changing the static metadata after it's been
7204                  * set is permissible, however. So only force a
7205                  * modeset if we're entering or exiting HDR.
7206                  */
7207                 new_crtc_state->mode_changed =
7208                         !old_con_state->hdr_output_metadata ||
7209                         !new_con_state->hdr_output_metadata;
7210         }
7211
7212         return 0;
7213 }
7214
7215 static const struct drm_connector_helper_funcs
7216 amdgpu_dm_connector_helper_funcs = {
7217         /*
7218          * If hotplugging a second bigger display in FB Con mode, bigger resolution
7219          * modes will be filtered by drm_mode_validate_size(), and those modes
7220          * are missing after user start lightdm. So we need to renew modes list.
7221          * in get_modes call back, not just return the modes count
7222          */
7223         .get_modes = get_modes,
7224         .mode_valid = amdgpu_dm_connector_mode_valid,
7225         .atomic_check = amdgpu_dm_connector_atomic_check,
7226 };
7227
7228 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7229 {
7230 }
7231
7232 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7233 {
7234         struct drm_atomic_state *state = new_crtc_state->state;
7235         struct drm_plane *plane;
7236         int num_active = 0;
7237
7238         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7239                 struct drm_plane_state *new_plane_state;
7240
7241                 /* Cursor planes are "fake". */
7242                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7243                         continue;
7244
7245                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7246
7247                 if (!new_plane_state) {
7248                         /*
7249                          * The plane is enable on the CRTC and hasn't changed
7250                          * state. This means that it previously passed
7251                          * validation and is therefore enabled.
7252                          */
7253                         num_active += 1;
7254                         continue;
7255                 }
7256
7257                 /* We need a framebuffer to be considered enabled. */
7258                 num_active += (new_plane_state->fb != NULL);
7259         }
7260
7261         return num_active;
7262 }
7263
7264 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7265                                          struct drm_crtc_state *new_crtc_state)
7266 {
7267         struct dm_crtc_state *dm_new_crtc_state =
7268                 to_dm_crtc_state(new_crtc_state);
7269
7270         dm_new_crtc_state->active_planes = 0;
7271
7272         if (!dm_new_crtc_state->stream)
7273                 return;
7274
7275         dm_new_crtc_state->active_planes =
7276                 count_crtc_active_planes(new_crtc_state);
7277 }
7278
7279 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7280                                        struct drm_atomic_state *state)
7281 {
7282         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7283                                                                           crtc);
7284         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7285         struct dc *dc = adev->dm.dc;
7286         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7287         int ret = -EINVAL;
7288
7289         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7290
7291         dm_update_crtc_active_planes(crtc, crtc_state);
7292
7293         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7294                      modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7295                 return ret;
7296         }
7297
7298         /*
7299          * We require the primary plane to be enabled whenever the CRTC is, otherwise
7300          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7301          * planes are disabled, which is not supported by the hardware. And there is legacy
7302          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7303          */
7304         if (crtc_state->enable &&
7305             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7306                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7307                 return -EINVAL;
7308         }
7309
7310         /* In some use cases, like reset, no stream is attached */
7311         if (!dm_crtc_state->stream)
7312                 return 0;
7313
7314         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7315                 return 0;
7316
7317         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7318         return ret;
7319 }
7320
7321 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7322                                       const struct drm_display_mode *mode,
7323                                       struct drm_display_mode *adjusted_mode)
7324 {
7325         return true;
7326 }
7327
7328 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7329         .disable = dm_crtc_helper_disable,
7330         .atomic_check = dm_crtc_helper_atomic_check,
7331         .mode_fixup = dm_crtc_helper_mode_fixup,
7332         .get_scanout_position = amdgpu_crtc_get_scanout_position,
7333 };
7334
7335 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7336 {
7337
7338 }
7339
7340 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7341 {
7342         switch (display_color_depth) {
7343                 case COLOR_DEPTH_666:
7344                         return 6;
7345                 case COLOR_DEPTH_888:
7346                         return 8;
7347                 case COLOR_DEPTH_101010:
7348                         return 10;
7349                 case COLOR_DEPTH_121212:
7350                         return 12;
7351                 case COLOR_DEPTH_141414:
7352                         return 14;
7353                 case COLOR_DEPTH_161616:
7354                         return 16;
7355                 default:
7356                         break;
7357                 }
7358         return 0;
7359 }
7360
7361 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7362                                           struct drm_crtc_state *crtc_state,
7363                                           struct drm_connector_state *conn_state)
7364 {
7365         struct drm_atomic_state *state = crtc_state->state;
7366         struct drm_connector *connector = conn_state->connector;
7367         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7368         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7369         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7370         struct drm_dp_mst_topology_mgr *mst_mgr;
7371         struct drm_dp_mst_port *mst_port;
7372         enum dc_color_depth color_depth;
7373         int clock, bpp = 0;
7374         bool is_y420 = false;
7375
7376         if (!aconnector->port || !aconnector->dc_sink)
7377                 return 0;
7378
7379         mst_port = aconnector->port;
7380         mst_mgr = &aconnector->mst_port->mst_mgr;
7381
7382         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7383                 return 0;
7384
7385         if (!state->duplicated) {
7386                 int max_bpc = conn_state->max_requested_bpc;
7387                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7388                                 aconnector->force_yuv420_output;
7389                 color_depth = convert_color_depth_from_display_info(connector,
7390                                                                     is_y420,
7391                                                                     max_bpc);
7392                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7393                 clock = adjusted_mode->clock;
7394                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7395         }
7396         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7397                                                                            mst_mgr,
7398                                                                            mst_port,
7399                                                                            dm_new_connector_state->pbn,
7400                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
7401         if (dm_new_connector_state->vcpi_slots < 0) {
7402                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7403                 return dm_new_connector_state->vcpi_slots;
7404         }
7405         return 0;
7406 }
7407
7408 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7409         .disable = dm_encoder_helper_disable,
7410         .atomic_check = dm_encoder_helper_atomic_check
7411 };
7412
7413 #if defined(CONFIG_DRM_AMD_DC_DCN)
7414 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7415                                             struct dc_state *dc_state,
7416                                             struct dsc_mst_fairness_vars *vars)
7417 {
7418         struct dc_stream_state *stream = NULL;
7419         struct drm_connector *connector;
7420         struct drm_connector_state *new_con_state;
7421         struct amdgpu_dm_connector *aconnector;
7422         struct dm_connector_state *dm_conn_state;
7423         int i, j;
7424         int vcpi, pbn_div, pbn, slot_num = 0;
7425
7426         for_each_new_connector_in_state(state, connector, new_con_state, i) {
7427
7428                 aconnector = to_amdgpu_dm_connector(connector);
7429
7430                 if (!aconnector->port)
7431                         continue;
7432
7433                 if (!new_con_state || !new_con_state->crtc)
7434                         continue;
7435
7436                 dm_conn_state = to_dm_connector_state(new_con_state);
7437
7438                 for (j = 0; j < dc_state->stream_count; j++) {
7439                         stream = dc_state->streams[j];
7440                         if (!stream)
7441                                 continue;
7442
7443                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7444                                 break;
7445
7446                         stream = NULL;
7447                 }
7448
7449                 if (!stream)
7450                         continue;
7451
7452                 pbn_div = dm_mst_get_pbn_divider(stream->link);
7453                 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7454                 for (j = 0; j < dc_state->stream_count; j++) {
7455                         if (vars[j].aconnector == aconnector) {
7456                                 pbn = vars[j].pbn;
7457                                 break;
7458                         }
7459                 }
7460
7461                 if (j == dc_state->stream_count)
7462                         continue;
7463
7464                 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7465
7466                 if (stream->timing.flags.DSC != 1) {
7467                         dm_conn_state->pbn = pbn;
7468                         dm_conn_state->vcpi_slots = slot_num;
7469
7470                         drm_dp_mst_atomic_enable_dsc(state,
7471                                                      aconnector->port,
7472                                                      dm_conn_state->pbn,
7473                                                      0,
7474                                                      false);
7475                         continue;
7476                 }
7477
7478                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7479                                                     aconnector->port,
7480                                                     pbn, pbn_div,
7481                                                     true);
7482                 if (vcpi < 0)
7483                         return vcpi;
7484
7485                 dm_conn_state->pbn = pbn;
7486                 dm_conn_state->vcpi_slots = vcpi;
7487         }
7488         return 0;
7489 }
7490 #endif
7491
7492 static void dm_drm_plane_reset(struct drm_plane *plane)
7493 {
7494         struct dm_plane_state *amdgpu_state = NULL;
7495
7496         if (plane->state)
7497                 plane->funcs->atomic_destroy_state(plane, plane->state);
7498
7499         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7500         WARN_ON(amdgpu_state == NULL);
7501
7502         if (amdgpu_state)
7503                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7504 }
7505
7506 static struct drm_plane_state *
7507 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7508 {
7509         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7510
7511         old_dm_plane_state = to_dm_plane_state(plane->state);
7512         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7513         if (!dm_plane_state)
7514                 return NULL;
7515
7516         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7517
7518         if (old_dm_plane_state->dc_state) {
7519                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7520                 dc_plane_state_retain(dm_plane_state->dc_state);
7521         }
7522
7523         return &dm_plane_state->base;
7524 }
7525
7526 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7527                                 struct drm_plane_state *state)
7528 {
7529         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7530
7531         if (dm_plane_state->dc_state)
7532                 dc_plane_state_release(dm_plane_state->dc_state);
7533
7534         drm_atomic_helper_plane_destroy_state(plane, state);
7535 }
7536
7537 static const struct drm_plane_funcs dm_plane_funcs = {
7538         .update_plane   = drm_atomic_helper_update_plane,
7539         .disable_plane  = drm_atomic_helper_disable_plane,
7540         .destroy        = drm_primary_helper_destroy,
7541         .reset = dm_drm_plane_reset,
7542         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7543         .atomic_destroy_state = dm_drm_plane_destroy_state,
7544         .format_mod_supported = dm_plane_format_mod_supported,
7545 };
7546
7547 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7548                                       struct drm_plane_state *new_state)
7549 {
7550         struct amdgpu_framebuffer *afb;
7551         struct drm_gem_object *obj;
7552         struct amdgpu_device *adev;
7553         struct amdgpu_bo *rbo;
7554         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7555         uint32_t domain;
7556         int r;
7557
7558         if (!new_state->fb) {
7559                 DRM_DEBUG_KMS("No FB bound\n");
7560                 return 0;
7561         }
7562
7563         afb = to_amdgpu_framebuffer(new_state->fb);
7564         obj = new_state->fb->obj[0];
7565         rbo = gem_to_amdgpu_bo(obj);
7566         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7567
7568         r = amdgpu_bo_reserve(rbo, true);
7569         if (r) {
7570                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7571                 return r;
7572         }
7573
7574         r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
7575         if (r) {
7576                 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
7577                 goto error_unlock;
7578         }
7579
7580         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7581                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7582         else
7583                 domain = AMDGPU_GEM_DOMAIN_VRAM;
7584
7585         r = amdgpu_bo_pin(rbo, domain);
7586         if (unlikely(r != 0)) {
7587                 if (r != -ERESTARTSYS)
7588                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7589                 goto error_unlock;
7590         }
7591
7592         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7593         if (unlikely(r != 0)) {
7594                 DRM_ERROR("%p bind failed\n", rbo);
7595                 goto error_unpin;
7596         }
7597
7598         amdgpu_bo_unreserve(rbo);
7599
7600         afb->address = amdgpu_bo_gpu_offset(rbo);
7601
7602         amdgpu_bo_ref(rbo);
7603
7604         /**
7605          * We don't do surface updates on planes that have been newly created,
7606          * but we also don't have the afb->address during atomic check.
7607          *
7608          * Fill in buffer attributes depending on the address here, but only on
7609          * newly created planes since they're not being used by DC yet and this
7610          * won't modify global state.
7611          */
7612         dm_plane_state_old = to_dm_plane_state(plane->state);
7613         dm_plane_state_new = to_dm_plane_state(new_state);
7614
7615         if (dm_plane_state_new->dc_state &&
7616             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7617                 struct dc_plane_state *plane_state =
7618                         dm_plane_state_new->dc_state;
7619                 bool force_disable_dcc = !plane_state->dcc.enable;
7620
7621                 fill_plane_buffer_attributes(
7622                         adev, afb, plane_state->format, plane_state->rotation,
7623                         afb->tiling_flags,
7624                         &plane_state->tiling_info, &plane_state->plane_size,
7625                         &plane_state->dcc, &plane_state->address,
7626                         afb->tmz_surface, force_disable_dcc);
7627         }
7628
7629         return 0;
7630
7631 error_unpin:
7632         amdgpu_bo_unpin(rbo);
7633
7634 error_unlock:
7635         amdgpu_bo_unreserve(rbo);
7636         return r;
7637 }
7638
7639 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7640                                        struct drm_plane_state *old_state)
7641 {
7642         struct amdgpu_bo *rbo;
7643         int r;
7644
7645         if (!old_state->fb)
7646                 return;
7647
7648         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7649         r = amdgpu_bo_reserve(rbo, false);
7650         if (unlikely(r)) {
7651                 DRM_ERROR("failed to reserve rbo before unpin\n");
7652                 return;
7653         }
7654
7655         amdgpu_bo_unpin(rbo);
7656         amdgpu_bo_unreserve(rbo);
7657         amdgpu_bo_unref(&rbo);
7658 }
7659
7660 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7661                                        struct drm_crtc_state *new_crtc_state)
7662 {
7663         struct drm_framebuffer *fb = state->fb;
7664         int min_downscale, max_upscale;
7665         int min_scale = 0;
7666         int max_scale = INT_MAX;
7667
7668         /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7669         if (fb && state->crtc) {
7670                 /* Validate viewport to cover the case when only the position changes */
7671                 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7672                         int viewport_width = state->crtc_w;
7673                         int viewport_height = state->crtc_h;
7674
7675                         if (state->crtc_x < 0)
7676                                 viewport_width += state->crtc_x;
7677                         else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7678                                 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7679
7680                         if (state->crtc_y < 0)
7681                                 viewport_height += state->crtc_y;
7682                         else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7683                                 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7684
7685                         if (viewport_width < 0 || viewport_height < 0) {
7686                                 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7687                                 return -EINVAL;
7688                         } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7689                                 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7690                                 return -EINVAL;
7691                         } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7692                                 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7693                                 return -EINVAL;
7694                         }
7695
7696                 }
7697
7698                 /* Get min/max allowed scaling factors from plane caps. */
7699                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7700                                              &min_downscale, &max_upscale);
7701                 /*
7702                  * Convert to drm convention: 16.16 fixed point, instead of dc's
7703                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7704                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
7705                  */
7706                 min_scale = (1000 << 16) / max_upscale;
7707                 max_scale = (1000 << 16) / min_downscale;
7708         }
7709
7710         return drm_atomic_helper_check_plane_state(
7711                 state, new_crtc_state, min_scale, max_scale, true, true);
7712 }
7713
7714 static int dm_plane_atomic_check(struct drm_plane *plane,
7715                                  struct drm_atomic_state *state)
7716 {
7717         struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7718                                                                                  plane);
7719         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7720         struct dc *dc = adev->dm.dc;
7721         struct dm_plane_state *dm_plane_state;
7722         struct dc_scaling_info scaling_info;
7723         struct drm_crtc_state *new_crtc_state;
7724         int ret;
7725
7726         trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7727
7728         dm_plane_state = to_dm_plane_state(new_plane_state);
7729
7730         if (!dm_plane_state->dc_state)
7731                 return 0;
7732
7733         new_crtc_state =
7734                 drm_atomic_get_new_crtc_state(state,
7735                                               new_plane_state->crtc);
7736         if (!new_crtc_state)
7737                 return -EINVAL;
7738
7739         ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7740         if (ret)
7741                 return ret;
7742
7743         ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7744         if (ret)
7745                 return ret;
7746
7747         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7748                 return 0;
7749
7750         return -EINVAL;
7751 }
7752
7753 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7754                                        struct drm_atomic_state *state)
7755 {
7756         /* Only support async updates on cursor planes. */
7757         if (plane->type != DRM_PLANE_TYPE_CURSOR)
7758                 return -EINVAL;
7759
7760         return 0;
7761 }
7762
7763 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7764                                          struct drm_atomic_state *state)
7765 {
7766         struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7767                                                                            plane);
7768         struct drm_plane_state *old_state =
7769                 drm_atomic_get_old_plane_state(state, plane);
7770
7771         trace_amdgpu_dm_atomic_update_cursor(new_state);
7772
7773         swap(plane->state->fb, new_state->fb);
7774
7775         plane->state->src_x = new_state->src_x;
7776         plane->state->src_y = new_state->src_y;
7777         plane->state->src_w = new_state->src_w;
7778         plane->state->src_h = new_state->src_h;
7779         plane->state->crtc_x = new_state->crtc_x;
7780         plane->state->crtc_y = new_state->crtc_y;
7781         plane->state->crtc_w = new_state->crtc_w;
7782         plane->state->crtc_h = new_state->crtc_h;
7783
7784         handle_cursor_update(plane, old_state);
7785 }
7786
7787 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7788         .prepare_fb = dm_plane_helper_prepare_fb,
7789         .cleanup_fb = dm_plane_helper_cleanup_fb,
7790         .atomic_check = dm_plane_atomic_check,
7791         .atomic_async_check = dm_plane_atomic_async_check,
7792         .atomic_async_update = dm_plane_atomic_async_update
7793 };
7794
7795 /*
7796  * TODO: these are currently initialized to rgb formats only.
7797  * For future use cases we should either initialize them dynamically based on
7798  * plane capabilities, or initialize this array to all formats, so internal drm
7799  * check will succeed, and let DC implement proper check
7800  */
7801 static const uint32_t rgb_formats[] = {
7802         DRM_FORMAT_XRGB8888,
7803         DRM_FORMAT_ARGB8888,
7804         DRM_FORMAT_RGBA8888,
7805         DRM_FORMAT_XRGB2101010,
7806         DRM_FORMAT_XBGR2101010,
7807         DRM_FORMAT_ARGB2101010,
7808         DRM_FORMAT_ABGR2101010,
7809         DRM_FORMAT_XRGB16161616,
7810         DRM_FORMAT_XBGR16161616,
7811         DRM_FORMAT_ARGB16161616,
7812         DRM_FORMAT_ABGR16161616,
7813         DRM_FORMAT_XBGR8888,
7814         DRM_FORMAT_ABGR8888,
7815         DRM_FORMAT_RGB565,
7816 };
7817
7818 static const uint32_t overlay_formats[] = {
7819         DRM_FORMAT_XRGB8888,
7820         DRM_FORMAT_ARGB8888,
7821         DRM_FORMAT_RGBA8888,
7822         DRM_FORMAT_XBGR8888,
7823         DRM_FORMAT_ABGR8888,
7824         DRM_FORMAT_RGB565
7825 };
7826
7827 static const u32 cursor_formats[] = {
7828         DRM_FORMAT_ARGB8888
7829 };
7830
7831 static int get_plane_formats(const struct drm_plane *plane,
7832                              const struct dc_plane_cap *plane_cap,
7833                              uint32_t *formats, int max_formats)
7834 {
7835         int i, num_formats = 0;
7836
7837         /*
7838          * TODO: Query support for each group of formats directly from
7839          * DC plane caps. This will require adding more formats to the
7840          * caps list.
7841          */
7842
7843         switch (plane->type) {
7844         case DRM_PLANE_TYPE_PRIMARY:
7845                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7846                         if (num_formats >= max_formats)
7847                                 break;
7848
7849                         formats[num_formats++] = rgb_formats[i];
7850                 }
7851
7852                 if (plane_cap && plane_cap->pixel_format_support.nv12)
7853                         formats[num_formats++] = DRM_FORMAT_NV12;
7854                 if (plane_cap && plane_cap->pixel_format_support.p010)
7855                         formats[num_formats++] = DRM_FORMAT_P010;
7856                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7857                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7858                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7859                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7860                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7861                 }
7862                 break;
7863
7864         case DRM_PLANE_TYPE_OVERLAY:
7865                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7866                         if (num_formats >= max_formats)
7867                                 break;
7868
7869                         formats[num_formats++] = overlay_formats[i];
7870                 }
7871                 break;
7872
7873         case DRM_PLANE_TYPE_CURSOR:
7874                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7875                         if (num_formats >= max_formats)
7876                                 break;
7877
7878                         formats[num_formats++] = cursor_formats[i];
7879                 }
7880                 break;
7881         }
7882
7883         return num_formats;
7884 }
7885
7886 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7887                                 struct drm_plane *plane,
7888                                 unsigned long possible_crtcs,
7889                                 const struct dc_plane_cap *plane_cap)
7890 {
7891         uint32_t formats[32];
7892         int num_formats;
7893         int res = -EPERM;
7894         unsigned int supported_rotations;
7895         uint64_t *modifiers = NULL;
7896
7897         num_formats = get_plane_formats(plane, plane_cap, formats,
7898                                         ARRAY_SIZE(formats));
7899
7900         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7901         if (res)
7902                 return res;
7903
7904         if (modifiers == NULL)
7905                 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true;
7906
7907         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7908                                        &dm_plane_funcs, formats, num_formats,
7909                                        modifiers, plane->type, NULL);
7910         kfree(modifiers);
7911         if (res)
7912                 return res;
7913
7914         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7915             plane_cap && plane_cap->per_pixel_alpha) {
7916                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7917                                           BIT(DRM_MODE_BLEND_PREMULTI) |
7918                                           BIT(DRM_MODE_BLEND_COVERAGE);
7919
7920                 drm_plane_create_alpha_property(plane);
7921                 drm_plane_create_blend_mode_property(plane, blend_caps);
7922         }
7923
7924         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7925             plane_cap &&
7926             (plane_cap->pixel_format_support.nv12 ||
7927              plane_cap->pixel_format_support.p010)) {
7928                 /* This only affects YUV formats. */
7929                 drm_plane_create_color_properties(
7930                         plane,
7931                         BIT(DRM_COLOR_YCBCR_BT601) |
7932                         BIT(DRM_COLOR_YCBCR_BT709) |
7933                         BIT(DRM_COLOR_YCBCR_BT2020),
7934                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7935                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7936                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7937         }
7938
7939         supported_rotations =
7940                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7941                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7942
7943         if (dm->adev->asic_type >= CHIP_BONAIRE &&
7944             plane->type != DRM_PLANE_TYPE_CURSOR)
7945                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7946                                                    supported_rotations);
7947
7948         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7949
7950         /* Create (reset) the plane state */
7951         if (plane->funcs->reset)
7952                 plane->funcs->reset(plane);
7953
7954         return 0;
7955 }
7956
7957 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7958                                struct drm_plane *plane,
7959                                uint32_t crtc_index)
7960 {
7961         struct amdgpu_crtc *acrtc = NULL;
7962         struct drm_plane *cursor_plane;
7963
7964         int res = -ENOMEM;
7965
7966         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7967         if (!cursor_plane)
7968                 goto fail;
7969
7970         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7971         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7972
7973         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7974         if (!acrtc)
7975                 goto fail;
7976
7977         res = drm_crtc_init_with_planes(
7978                         dm->ddev,
7979                         &acrtc->base,
7980                         plane,
7981                         cursor_plane,
7982                         &amdgpu_dm_crtc_funcs, NULL);
7983
7984         if (res)
7985                 goto fail;
7986
7987         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7988
7989         /* Create (reset) the plane state */
7990         if (acrtc->base.funcs->reset)
7991                 acrtc->base.funcs->reset(&acrtc->base);
7992
7993         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7994         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7995
7996         acrtc->crtc_id = crtc_index;
7997         acrtc->base.enabled = false;
7998         acrtc->otg_inst = -1;
7999
8000         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
8001         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
8002                                    true, MAX_COLOR_LUT_ENTRIES);
8003         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
8004
8005         return 0;
8006
8007 fail:
8008         kfree(acrtc);
8009         kfree(cursor_plane);
8010         return res;
8011 }
8012
8013
8014 static int to_drm_connector_type(enum signal_type st)
8015 {
8016         switch (st) {
8017         case SIGNAL_TYPE_HDMI_TYPE_A:
8018                 return DRM_MODE_CONNECTOR_HDMIA;
8019         case SIGNAL_TYPE_EDP:
8020                 return DRM_MODE_CONNECTOR_eDP;
8021         case SIGNAL_TYPE_LVDS:
8022                 return DRM_MODE_CONNECTOR_LVDS;
8023         case SIGNAL_TYPE_RGB:
8024                 return DRM_MODE_CONNECTOR_VGA;
8025         case SIGNAL_TYPE_DISPLAY_PORT:
8026         case SIGNAL_TYPE_DISPLAY_PORT_MST:
8027                 return DRM_MODE_CONNECTOR_DisplayPort;
8028         case SIGNAL_TYPE_DVI_DUAL_LINK:
8029         case SIGNAL_TYPE_DVI_SINGLE_LINK:
8030                 return DRM_MODE_CONNECTOR_DVID;
8031         case SIGNAL_TYPE_VIRTUAL:
8032                 return DRM_MODE_CONNECTOR_VIRTUAL;
8033
8034         default:
8035                 return DRM_MODE_CONNECTOR_Unknown;
8036         }
8037 }
8038
8039 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
8040 {
8041         struct drm_encoder *encoder;
8042
8043         /* There is only one encoder per connector */
8044         drm_connector_for_each_possible_encoder(connector, encoder)
8045                 return encoder;
8046
8047         return NULL;
8048 }
8049
8050 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8051 {
8052         struct drm_encoder *encoder;
8053         struct amdgpu_encoder *amdgpu_encoder;
8054
8055         encoder = amdgpu_dm_connector_to_encoder(connector);
8056
8057         if (encoder == NULL)
8058                 return;
8059
8060         amdgpu_encoder = to_amdgpu_encoder(encoder);
8061
8062         amdgpu_encoder->native_mode.clock = 0;
8063
8064         if (!list_empty(&connector->probed_modes)) {
8065                 struct drm_display_mode *preferred_mode = NULL;
8066
8067                 list_for_each_entry(preferred_mode,
8068                                     &connector->probed_modes,
8069                                     head) {
8070                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8071                                 amdgpu_encoder->native_mode = *preferred_mode;
8072
8073                         break;
8074                 }
8075
8076         }
8077 }
8078
8079 static struct drm_display_mode *
8080 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8081                              char *name,
8082                              int hdisplay, int vdisplay)
8083 {
8084         struct drm_device *dev = encoder->dev;
8085         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8086         struct drm_display_mode *mode = NULL;
8087         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8088
8089         mode = drm_mode_duplicate(dev, native_mode);
8090
8091         if (mode == NULL)
8092                 return NULL;
8093
8094         mode->hdisplay = hdisplay;
8095         mode->vdisplay = vdisplay;
8096         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8097         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8098
8099         return mode;
8100
8101 }
8102
8103 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8104                                                  struct drm_connector *connector)
8105 {
8106         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8107         struct drm_display_mode *mode = NULL;
8108         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8109         struct amdgpu_dm_connector *amdgpu_dm_connector =
8110                                 to_amdgpu_dm_connector(connector);
8111         int i;
8112         int n;
8113         struct mode_size {
8114                 char name[DRM_DISPLAY_MODE_LEN];
8115                 int w;
8116                 int h;
8117         } common_modes[] = {
8118                 {  "640x480",  640,  480},
8119                 {  "800x600",  800,  600},
8120                 { "1024x768", 1024,  768},
8121                 { "1280x720", 1280,  720},
8122                 { "1280x800", 1280,  800},
8123                 {"1280x1024", 1280, 1024},
8124                 { "1440x900", 1440,  900},
8125                 {"1680x1050", 1680, 1050},
8126                 {"1600x1200", 1600, 1200},
8127                 {"1920x1080", 1920, 1080},
8128                 {"1920x1200", 1920, 1200}
8129         };
8130
8131         n = ARRAY_SIZE(common_modes);
8132
8133         for (i = 0; i < n; i++) {
8134                 struct drm_display_mode *curmode = NULL;
8135                 bool mode_existed = false;
8136
8137                 if (common_modes[i].w > native_mode->hdisplay ||
8138                     common_modes[i].h > native_mode->vdisplay ||
8139                    (common_modes[i].w == native_mode->hdisplay &&
8140                     common_modes[i].h == native_mode->vdisplay))
8141                         continue;
8142
8143                 list_for_each_entry(curmode, &connector->probed_modes, head) {
8144                         if (common_modes[i].w == curmode->hdisplay &&
8145                             common_modes[i].h == curmode->vdisplay) {
8146                                 mode_existed = true;
8147                                 break;
8148                         }
8149                 }
8150
8151                 if (mode_existed)
8152                         continue;
8153
8154                 mode = amdgpu_dm_create_common_mode(encoder,
8155                                 common_modes[i].name, common_modes[i].w,
8156                                 common_modes[i].h);
8157                 if (!mode)
8158                         continue;
8159
8160                 drm_mode_probed_add(connector, mode);
8161                 amdgpu_dm_connector->num_modes++;
8162         }
8163 }
8164
8165 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8166 {
8167         struct drm_encoder *encoder;
8168         struct amdgpu_encoder *amdgpu_encoder;
8169         const struct drm_display_mode *native_mode;
8170
8171         if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8172             connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8173                 return;
8174
8175         encoder = amdgpu_dm_connector_to_encoder(connector);
8176         if (!encoder)
8177                 return;
8178
8179         amdgpu_encoder = to_amdgpu_encoder(encoder);
8180
8181         native_mode = &amdgpu_encoder->native_mode;
8182         if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8183                 return;
8184
8185         drm_connector_set_panel_orientation_with_quirk(connector,
8186                                                        DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8187                                                        native_mode->hdisplay,
8188                                                        native_mode->vdisplay);
8189 }
8190
8191 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8192                                               struct edid *edid)
8193 {
8194         struct amdgpu_dm_connector *amdgpu_dm_connector =
8195                         to_amdgpu_dm_connector(connector);
8196
8197         if (edid) {
8198                 /* empty probed_modes */
8199                 INIT_LIST_HEAD(&connector->probed_modes);
8200                 amdgpu_dm_connector->num_modes =
8201                                 drm_add_edid_modes(connector, edid);
8202
8203                 /* sorting the probed modes before calling function
8204                  * amdgpu_dm_get_native_mode() since EDID can have
8205                  * more than one preferred mode. The modes that are
8206                  * later in the probed mode list could be of higher
8207                  * and preferred resolution. For example, 3840x2160
8208                  * resolution in base EDID preferred timing and 4096x2160
8209                  * preferred resolution in DID extension block later.
8210                  */
8211                 drm_mode_sort(&connector->probed_modes);
8212                 amdgpu_dm_get_native_mode(connector);
8213
8214                 /* Freesync capabilities are reset by calling
8215                  * drm_add_edid_modes() and need to be
8216                  * restored here.
8217                  */
8218                 amdgpu_dm_update_freesync_caps(connector, edid);
8219
8220                 amdgpu_set_panel_orientation(connector);
8221         } else {
8222                 amdgpu_dm_connector->num_modes = 0;
8223         }
8224 }
8225
8226 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8227                               struct drm_display_mode *mode)
8228 {
8229         struct drm_display_mode *m;
8230
8231         list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8232                 if (drm_mode_equal(m, mode))
8233                         return true;
8234         }
8235
8236         return false;
8237 }
8238
8239 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8240 {
8241         const struct drm_display_mode *m;
8242         struct drm_display_mode *new_mode;
8243         uint i;
8244         uint32_t new_modes_count = 0;
8245
8246         /* Standard FPS values
8247          *
8248          * 23.976       - TV/NTSC
8249          * 24           - Cinema
8250          * 25           - TV/PAL
8251          * 29.97        - TV/NTSC
8252          * 30           - TV/NTSC
8253          * 48           - Cinema HFR
8254          * 50           - TV/PAL
8255          * 60           - Commonly used
8256          * 48,72,96,120 - Multiples of 24
8257          */
8258         static const uint32_t common_rates[] = {
8259                 23976, 24000, 25000, 29970, 30000,
8260                 48000, 50000, 60000, 72000, 96000, 120000
8261         };
8262
8263         /*
8264          * Find mode with highest refresh rate with the same resolution
8265          * as the preferred mode. Some monitors report a preferred mode
8266          * with lower resolution than the highest refresh rate supported.
8267          */
8268
8269         m = get_highest_refresh_rate_mode(aconnector, true);
8270         if (!m)
8271                 return 0;
8272
8273         for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8274                 uint64_t target_vtotal, target_vtotal_diff;
8275                 uint64_t num, den;
8276
8277                 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8278                         continue;
8279
8280                 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8281                     common_rates[i] > aconnector->max_vfreq * 1000)
8282                         continue;
8283
8284                 num = (unsigned long long)m->clock * 1000 * 1000;
8285                 den = common_rates[i] * (unsigned long long)m->htotal;
8286                 target_vtotal = div_u64(num, den);
8287                 target_vtotal_diff = target_vtotal - m->vtotal;
8288
8289                 /* Check for illegal modes */
8290                 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8291                     m->vsync_end + target_vtotal_diff < m->vsync_start ||
8292                     m->vtotal + target_vtotal_diff < m->vsync_end)
8293                         continue;
8294
8295                 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8296                 if (!new_mode)
8297                         goto out;
8298
8299                 new_mode->vtotal += (u16)target_vtotal_diff;
8300                 new_mode->vsync_start += (u16)target_vtotal_diff;
8301                 new_mode->vsync_end += (u16)target_vtotal_diff;
8302                 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8303                 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8304
8305                 if (!is_duplicate_mode(aconnector, new_mode)) {
8306                         drm_mode_probed_add(&aconnector->base, new_mode);
8307                         new_modes_count += 1;
8308                 } else
8309                         drm_mode_destroy(aconnector->base.dev, new_mode);
8310         }
8311  out:
8312         return new_modes_count;
8313 }
8314
8315 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8316                                                    struct edid *edid)
8317 {
8318         struct amdgpu_dm_connector *amdgpu_dm_connector =
8319                 to_amdgpu_dm_connector(connector);
8320
8321         if (!edid)
8322                 return;
8323
8324         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8325                 amdgpu_dm_connector->num_modes +=
8326                         add_fs_modes(amdgpu_dm_connector);
8327 }
8328
8329 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8330 {
8331         struct amdgpu_dm_connector *amdgpu_dm_connector =
8332                         to_amdgpu_dm_connector(connector);
8333         struct drm_encoder *encoder;
8334         struct edid *edid = amdgpu_dm_connector->edid;
8335
8336         encoder = amdgpu_dm_connector_to_encoder(connector);
8337
8338         if (!drm_edid_is_valid(edid)) {
8339                 amdgpu_dm_connector->num_modes =
8340                                 drm_add_modes_noedid(connector, 640, 480);
8341         } else {
8342                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8343                 amdgpu_dm_connector_add_common_modes(encoder, connector);
8344                 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8345         }
8346         amdgpu_dm_fbc_init(connector);
8347
8348         return amdgpu_dm_connector->num_modes;
8349 }
8350
8351 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8352                                      struct amdgpu_dm_connector *aconnector,
8353                                      int connector_type,
8354                                      struct dc_link *link,
8355                                      int link_index)
8356 {
8357         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8358
8359         /*
8360          * Some of the properties below require access to state, like bpc.
8361          * Allocate some default initial connector state with our reset helper.
8362          */
8363         if (aconnector->base.funcs->reset)
8364                 aconnector->base.funcs->reset(&aconnector->base);
8365
8366         aconnector->connector_id = link_index;
8367         aconnector->dc_link = link;
8368         aconnector->base.interlace_allowed = false;
8369         aconnector->base.doublescan_allowed = false;
8370         aconnector->base.stereo_allowed = false;
8371         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8372         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8373         aconnector->audio_inst = -1;
8374         mutex_init(&aconnector->hpd_lock);
8375
8376         /*
8377          * configure support HPD hot plug connector_>polled default value is 0
8378          * which means HPD hot plug not supported
8379          */
8380         switch (connector_type) {
8381         case DRM_MODE_CONNECTOR_HDMIA:
8382                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8383                 aconnector->base.ycbcr_420_allowed =
8384                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8385                 break;
8386         case DRM_MODE_CONNECTOR_DisplayPort:
8387                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8388                 link->link_enc = link_enc_cfg_get_link_enc(link);
8389                 ASSERT(link->link_enc);
8390                 if (link->link_enc)
8391                         aconnector->base.ycbcr_420_allowed =
8392                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
8393                 break;
8394         case DRM_MODE_CONNECTOR_DVID:
8395                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8396                 break;
8397         default:
8398                 break;
8399         }
8400
8401         drm_object_attach_property(&aconnector->base.base,
8402                                 dm->ddev->mode_config.scaling_mode_property,
8403                                 DRM_MODE_SCALE_NONE);
8404
8405         drm_object_attach_property(&aconnector->base.base,
8406                                 adev->mode_info.underscan_property,
8407                                 UNDERSCAN_OFF);
8408         drm_object_attach_property(&aconnector->base.base,
8409                                 adev->mode_info.underscan_hborder_property,
8410                                 0);
8411         drm_object_attach_property(&aconnector->base.base,
8412                                 adev->mode_info.underscan_vborder_property,
8413                                 0);
8414
8415         if (!aconnector->mst_port)
8416                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8417
8418         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8419         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8420         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8421
8422         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8423             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8424                 drm_object_attach_property(&aconnector->base.base,
8425                                 adev->mode_info.abm_level_property, 0);
8426         }
8427
8428         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8429             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8430             connector_type == DRM_MODE_CONNECTOR_eDP) {
8431                 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8432
8433                 if (!aconnector->mst_port)
8434                         drm_connector_attach_vrr_capable_property(&aconnector->base);
8435
8436 #ifdef CONFIG_DRM_AMD_DC_HDCP
8437                 if (adev->dm.hdcp_workqueue)
8438                         drm_connector_attach_content_protection_property(&aconnector->base, true);
8439 #endif
8440         }
8441 }
8442
8443 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8444                               struct i2c_msg *msgs, int num)
8445 {
8446         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8447         struct ddc_service *ddc_service = i2c->ddc_service;
8448         struct i2c_command cmd;
8449         int i;
8450         int result = -EIO;
8451
8452         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8453
8454         if (!cmd.payloads)
8455                 return result;
8456
8457         cmd.number_of_payloads = num;
8458         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8459         cmd.speed = 100;
8460
8461         for (i = 0; i < num; i++) {
8462                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8463                 cmd.payloads[i].address = msgs[i].addr;
8464                 cmd.payloads[i].length = msgs[i].len;
8465                 cmd.payloads[i].data = msgs[i].buf;
8466         }
8467
8468         if (dc_submit_i2c(
8469                         ddc_service->ctx->dc,
8470                         ddc_service->ddc_pin->hw_info.ddc_channel,
8471                         &cmd))
8472                 result = num;
8473
8474         kfree(cmd.payloads);
8475         return result;
8476 }
8477
8478 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8479 {
8480         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8481 }
8482
8483 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8484         .master_xfer = amdgpu_dm_i2c_xfer,
8485         .functionality = amdgpu_dm_i2c_func,
8486 };
8487
8488 static struct amdgpu_i2c_adapter *
8489 create_i2c(struct ddc_service *ddc_service,
8490            int link_index,
8491            int *res)
8492 {
8493         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8494         struct amdgpu_i2c_adapter *i2c;
8495
8496         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8497         if (!i2c)
8498                 return NULL;
8499         i2c->base.owner = THIS_MODULE;
8500         i2c->base.class = I2C_CLASS_DDC;
8501         i2c->base.dev.parent = &adev->pdev->dev;
8502         i2c->base.algo = &amdgpu_dm_i2c_algo;
8503         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8504         i2c_set_adapdata(&i2c->base, i2c);
8505         i2c->ddc_service = ddc_service;
8506         if (i2c->ddc_service->ddc_pin)
8507                 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8508
8509         return i2c;
8510 }
8511
8512
8513 /*
8514  * Note: this function assumes that dc_link_detect() was called for the
8515  * dc_link which will be represented by this aconnector.
8516  */
8517 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8518                                     struct amdgpu_dm_connector *aconnector,
8519                                     uint32_t link_index,
8520                                     struct amdgpu_encoder *aencoder)
8521 {
8522         int res = 0;
8523         int connector_type;
8524         struct dc *dc = dm->dc;
8525         struct dc_link *link = dc_get_link_at_index(dc, link_index);
8526         struct amdgpu_i2c_adapter *i2c;
8527
8528         link->priv = aconnector;
8529
8530         DRM_DEBUG_DRIVER("%s()\n", __func__);
8531
8532         i2c = create_i2c(link->ddc, link->link_index, &res);
8533         if (!i2c) {
8534                 DRM_ERROR("Failed to create i2c adapter data\n");
8535                 return -ENOMEM;
8536         }
8537
8538         aconnector->i2c = i2c;
8539         res = i2c_add_adapter(&i2c->base);
8540
8541         if (res) {
8542                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8543                 goto out_free;
8544         }
8545
8546         connector_type = to_drm_connector_type(link->connector_signal);
8547
8548         res = drm_connector_init_with_ddc(
8549                         dm->ddev,
8550                         &aconnector->base,
8551                         &amdgpu_dm_connector_funcs,
8552                         connector_type,
8553                         &i2c->base);
8554
8555         if (res) {
8556                 DRM_ERROR("connector_init failed\n");
8557                 aconnector->connector_id = -1;
8558                 goto out_free;
8559         }
8560
8561         drm_connector_helper_add(
8562                         &aconnector->base,
8563                         &amdgpu_dm_connector_helper_funcs);
8564
8565         amdgpu_dm_connector_init_helper(
8566                 dm,
8567                 aconnector,
8568                 connector_type,
8569                 link,
8570                 link_index);
8571
8572         drm_connector_attach_encoder(
8573                 &aconnector->base, &aencoder->base);
8574
8575         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8576                 || connector_type == DRM_MODE_CONNECTOR_eDP)
8577                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8578
8579 out_free:
8580         if (res) {
8581                 kfree(i2c);
8582                 aconnector->i2c = NULL;
8583         }
8584         return res;
8585 }
8586
8587 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8588 {
8589         switch (adev->mode_info.num_crtc) {
8590         case 1:
8591                 return 0x1;
8592         case 2:
8593                 return 0x3;
8594         case 3:
8595                 return 0x7;
8596         case 4:
8597                 return 0xf;
8598         case 5:
8599                 return 0x1f;
8600         case 6:
8601         default:
8602                 return 0x3f;
8603         }
8604 }
8605
8606 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8607                                   struct amdgpu_encoder *aencoder,
8608                                   uint32_t link_index)
8609 {
8610         struct amdgpu_device *adev = drm_to_adev(dev);
8611
8612         int res = drm_encoder_init(dev,
8613                                    &aencoder->base,
8614                                    &amdgpu_dm_encoder_funcs,
8615                                    DRM_MODE_ENCODER_TMDS,
8616                                    NULL);
8617
8618         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8619
8620         if (!res)
8621                 aencoder->encoder_id = link_index;
8622         else
8623                 aencoder->encoder_id = -1;
8624
8625         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8626
8627         return res;
8628 }
8629
8630 static void manage_dm_interrupts(struct amdgpu_device *adev,
8631                                  struct amdgpu_crtc *acrtc,
8632                                  bool enable)
8633 {
8634         /*
8635          * We have no guarantee that the frontend index maps to the same
8636          * backend index - some even map to more than one.
8637          *
8638          * TODO: Use a different interrupt or check DC itself for the mapping.
8639          */
8640         int irq_type =
8641                 amdgpu_display_crtc_idx_to_irq_type(
8642                         adev,
8643                         acrtc->crtc_id);
8644
8645         if (enable) {
8646                 drm_crtc_vblank_on(&acrtc->base);
8647                 amdgpu_irq_get(
8648                         adev,
8649                         &adev->pageflip_irq,
8650                         irq_type);
8651 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8652                 amdgpu_irq_get(
8653                         adev,
8654                         &adev->vline0_irq,
8655                         irq_type);
8656 #endif
8657         } else {
8658 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8659                 amdgpu_irq_put(
8660                         adev,
8661                         &adev->vline0_irq,
8662                         irq_type);
8663 #endif
8664                 amdgpu_irq_put(
8665                         adev,
8666                         &adev->pageflip_irq,
8667                         irq_type);
8668                 drm_crtc_vblank_off(&acrtc->base);
8669         }
8670 }
8671
8672 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8673                                       struct amdgpu_crtc *acrtc)
8674 {
8675         int irq_type =
8676                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8677
8678         /**
8679          * This reads the current state for the IRQ and force reapplies
8680          * the setting to hardware.
8681          */
8682         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8683 }
8684
8685 static bool
8686 is_scaling_state_different(const struct dm_connector_state *dm_state,
8687                            const struct dm_connector_state *old_dm_state)
8688 {
8689         if (dm_state->scaling != old_dm_state->scaling)
8690                 return true;
8691         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8692                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8693                         return true;
8694         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8695                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8696                         return true;
8697         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8698                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8699                 return true;
8700         return false;
8701 }
8702
8703 #ifdef CONFIG_DRM_AMD_DC_HDCP
8704 static bool is_content_protection_different(struct drm_connector_state *state,
8705                                             const struct drm_connector_state *old_state,
8706                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8707 {
8708         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8709         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8710
8711         /* Handle: Type0/1 change */
8712         if (old_state->hdcp_content_type != state->hdcp_content_type &&
8713             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8714                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8715                 return true;
8716         }
8717
8718         /* CP is being re enabled, ignore this
8719          *
8720          * Handles:     ENABLED -> DESIRED
8721          */
8722         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8723             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8724                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8725                 return false;
8726         }
8727
8728         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8729          *
8730          * Handles:     UNDESIRED -> ENABLED
8731          */
8732         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8733             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8734                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8735
8736         /* Stream removed and re-enabled
8737          *
8738          * Can sometimes overlap with the HPD case,
8739          * thus set update_hdcp to false to avoid
8740          * setting HDCP multiple times.
8741          *
8742          * Handles:     DESIRED -> DESIRED (Special case)
8743          */
8744         if (!(old_state->crtc && old_state->crtc->enabled) &&
8745                 state->crtc && state->crtc->enabled &&
8746                 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8747                 dm_con_state->update_hdcp = false;
8748                 return true;
8749         }
8750
8751         /* Hot-plug, headless s3, dpms
8752          *
8753          * Only start HDCP if the display is connected/enabled.
8754          * update_hdcp flag will be set to false until the next
8755          * HPD comes in.
8756          *
8757          * Handles:     DESIRED -> DESIRED (Special case)
8758          */
8759         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8760             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8761                 dm_con_state->update_hdcp = false;
8762                 return true;
8763         }
8764
8765         /*
8766          * Handles:     UNDESIRED -> UNDESIRED
8767          *              DESIRED -> DESIRED
8768          *              ENABLED -> ENABLED
8769          */
8770         if (old_state->content_protection == state->content_protection)
8771                 return false;
8772
8773         /*
8774          * Handles:     UNDESIRED -> DESIRED
8775          *              DESIRED -> UNDESIRED
8776          *              ENABLED -> UNDESIRED
8777          */
8778         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8779                 return true;
8780
8781         /*
8782          * Handles:     DESIRED -> ENABLED
8783          */
8784         return false;
8785 }
8786
8787 #endif
8788 static void remove_stream(struct amdgpu_device *adev,
8789                           struct amdgpu_crtc *acrtc,
8790                           struct dc_stream_state *stream)
8791 {
8792         /* this is the update mode case */
8793
8794         acrtc->otg_inst = -1;
8795         acrtc->enabled = false;
8796 }
8797
8798 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8799                                struct dc_cursor_position *position)
8800 {
8801         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8802         int x, y;
8803         int xorigin = 0, yorigin = 0;
8804
8805         if (!crtc || !plane->state->fb)
8806                 return 0;
8807
8808         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8809             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8810                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8811                           __func__,
8812                           plane->state->crtc_w,
8813                           plane->state->crtc_h);
8814                 return -EINVAL;
8815         }
8816
8817         x = plane->state->crtc_x;
8818         y = plane->state->crtc_y;
8819
8820         if (x <= -amdgpu_crtc->max_cursor_width ||
8821             y <= -amdgpu_crtc->max_cursor_height)
8822                 return 0;
8823
8824         if (x < 0) {
8825                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8826                 x = 0;
8827         }
8828         if (y < 0) {
8829                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8830                 y = 0;
8831         }
8832         position->enable = true;
8833         position->translate_by_source = true;
8834         position->x = x;
8835         position->y = y;
8836         position->x_hotspot = xorigin;
8837         position->y_hotspot = yorigin;
8838
8839         return 0;
8840 }
8841
8842 static void handle_cursor_update(struct drm_plane *plane,
8843                                  struct drm_plane_state *old_plane_state)
8844 {
8845         struct amdgpu_device *adev = drm_to_adev(plane->dev);
8846         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8847         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8848         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8849         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8850         uint64_t address = afb ? afb->address : 0;
8851         struct dc_cursor_position position = {0};
8852         struct dc_cursor_attributes attributes;
8853         int ret;
8854
8855         if (!plane->state->fb && !old_plane_state->fb)
8856                 return;
8857
8858         DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8859                       __func__,
8860                       amdgpu_crtc->crtc_id,
8861                       plane->state->crtc_w,
8862                       plane->state->crtc_h);
8863
8864         ret = get_cursor_position(plane, crtc, &position);
8865         if (ret)
8866                 return;
8867
8868         if (!position.enable) {
8869                 /* turn off cursor */
8870                 if (crtc_state && crtc_state->stream) {
8871                         mutex_lock(&adev->dm.dc_lock);
8872                         dc_stream_set_cursor_position(crtc_state->stream,
8873                                                       &position);
8874                         mutex_unlock(&adev->dm.dc_lock);
8875                 }
8876                 return;
8877         }
8878
8879         amdgpu_crtc->cursor_width = plane->state->crtc_w;
8880         amdgpu_crtc->cursor_height = plane->state->crtc_h;
8881
8882         memset(&attributes, 0, sizeof(attributes));
8883         attributes.address.high_part = upper_32_bits(address);
8884         attributes.address.low_part  = lower_32_bits(address);
8885         attributes.width             = plane->state->crtc_w;
8886         attributes.height            = plane->state->crtc_h;
8887         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8888         attributes.rotation_angle    = 0;
8889         attributes.attribute_flags.value = 0;
8890
8891         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8892
8893         if (crtc_state->stream) {
8894                 mutex_lock(&adev->dm.dc_lock);
8895                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8896                                                          &attributes))
8897                         DRM_ERROR("DC failed to set cursor attributes\n");
8898
8899                 if (!dc_stream_set_cursor_position(crtc_state->stream,
8900                                                    &position))
8901                         DRM_ERROR("DC failed to set cursor position\n");
8902                 mutex_unlock(&adev->dm.dc_lock);
8903         }
8904 }
8905
8906 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8907 {
8908
8909         assert_spin_locked(&acrtc->base.dev->event_lock);
8910         WARN_ON(acrtc->event);
8911
8912         acrtc->event = acrtc->base.state->event;
8913
8914         /* Set the flip status */
8915         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8916
8917         /* Mark this event as consumed */
8918         acrtc->base.state->event = NULL;
8919
8920         DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8921                      acrtc->crtc_id);
8922 }
8923
8924 static void update_freesync_state_on_stream(
8925         struct amdgpu_display_manager *dm,
8926         struct dm_crtc_state *new_crtc_state,
8927         struct dc_stream_state *new_stream,
8928         struct dc_plane_state *surface,
8929         u32 flip_timestamp_in_us)
8930 {
8931         struct mod_vrr_params vrr_params;
8932         struct dc_info_packet vrr_infopacket = {0};
8933         struct amdgpu_device *adev = dm->adev;
8934         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8935         unsigned long flags;
8936         bool pack_sdp_v1_3 = false;
8937
8938         if (!new_stream)
8939                 return;
8940
8941         /*
8942          * TODO: Determine why min/max totals and vrefresh can be 0 here.
8943          * For now it's sufficient to just guard against these conditions.
8944          */
8945
8946         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8947                 return;
8948
8949         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8950         vrr_params = acrtc->dm_irq_params.vrr_params;
8951
8952         if (surface) {
8953                 mod_freesync_handle_preflip(
8954                         dm->freesync_module,
8955                         surface,
8956                         new_stream,
8957                         flip_timestamp_in_us,
8958                         &vrr_params);
8959
8960                 if (adev->family < AMDGPU_FAMILY_AI &&
8961                     amdgpu_dm_vrr_active(new_crtc_state)) {
8962                         mod_freesync_handle_v_update(dm->freesync_module,
8963                                                      new_stream, &vrr_params);
8964
8965                         /* Need to call this before the frame ends. */
8966                         dc_stream_adjust_vmin_vmax(dm->dc,
8967                                                    new_crtc_state->stream,
8968                                                    &vrr_params.adjust);
8969                 }
8970         }
8971
8972         mod_freesync_build_vrr_infopacket(
8973                 dm->freesync_module,
8974                 new_stream,
8975                 &vrr_params,
8976                 PACKET_TYPE_VRR,
8977                 TRANSFER_FUNC_UNKNOWN,
8978                 &vrr_infopacket,
8979                 pack_sdp_v1_3);
8980
8981         new_crtc_state->freesync_timing_changed |=
8982                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8983                         &vrr_params.adjust,
8984                         sizeof(vrr_params.adjust)) != 0);
8985
8986         new_crtc_state->freesync_vrr_info_changed |=
8987                 (memcmp(&new_crtc_state->vrr_infopacket,
8988                         &vrr_infopacket,
8989                         sizeof(vrr_infopacket)) != 0);
8990
8991         acrtc->dm_irq_params.vrr_params = vrr_params;
8992         new_crtc_state->vrr_infopacket = vrr_infopacket;
8993
8994         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8995         new_stream->vrr_infopacket = vrr_infopacket;
8996
8997         if (new_crtc_state->freesync_vrr_info_changed)
8998                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8999                               new_crtc_state->base.crtc->base.id,
9000                               (int)new_crtc_state->base.vrr_enabled,
9001                               (int)vrr_params.state);
9002
9003         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9004 }
9005
9006 static void update_stream_irq_parameters(
9007         struct amdgpu_display_manager *dm,
9008         struct dm_crtc_state *new_crtc_state)
9009 {
9010         struct dc_stream_state *new_stream = new_crtc_state->stream;
9011         struct mod_vrr_params vrr_params;
9012         struct mod_freesync_config config = new_crtc_state->freesync_config;
9013         struct amdgpu_device *adev = dm->adev;
9014         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
9015         unsigned long flags;
9016
9017         if (!new_stream)
9018                 return;
9019
9020         /*
9021          * TODO: Determine why min/max totals and vrefresh can be 0 here.
9022          * For now it's sufficient to just guard against these conditions.
9023          */
9024         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
9025                 return;
9026
9027         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9028         vrr_params = acrtc->dm_irq_params.vrr_params;
9029
9030         if (new_crtc_state->vrr_supported &&
9031             config.min_refresh_in_uhz &&
9032             config.max_refresh_in_uhz) {
9033                 /*
9034                  * if freesync compatible mode was set, config.state will be set
9035                  * in atomic check
9036                  */
9037                 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
9038                     (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
9039                      new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
9040                         vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
9041                         vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
9042                         vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
9043                         vrr_params.state = VRR_STATE_ACTIVE_FIXED;
9044                 } else {
9045                         config.state = new_crtc_state->base.vrr_enabled ?
9046                                                      VRR_STATE_ACTIVE_VARIABLE :
9047                                                      VRR_STATE_INACTIVE;
9048                 }
9049         } else {
9050                 config.state = VRR_STATE_UNSUPPORTED;
9051         }
9052
9053         mod_freesync_build_vrr_params(dm->freesync_module,
9054                                       new_stream,
9055                                       &config, &vrr_params);
9056
9057         new_crtc_state->freesync_timing_changed |=
9058                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9059                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9060
9061         new_crtc_state->freesync_config = config;
9062         /* Copy state for access from DM IRQ handler */
9063         acrtc->dm_irq_params.freesync_config = config;
9064         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9065         acrtc->dm_irq_params.vrr_params = vrr_params;
9066         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9067 }
9068
9069 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9070                                             struct dm_crtc_state *new_state)
9071 {
9072         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9073         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9074
9075         if (!old_vrr_active && new_vrr_active) {
9076                 /* Transition VRR inactive -> active:
9077                  * While VRR is active, we must not disable vblank irq, as a
9078                  * reenable after disable would compute bogus vblank/pflip
9079                  * timestamps if it likely happened inside display front-porch.
9080                  *
9081                  * We also need vupdate irq for the actual core vblank handling
9082                  * at end of vblank.
9083                  */
9084                 dm_set_vupdate_irq(new_state->base.crtc, true);
9085                 drm_crtc_vblank_get(new_state->base.crtc);
9086                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9087                                  __func__, new_state->base.crtc->base.id);
9088         } else if (old_vrr_active && !new_vrr_active) {
9089                 /* Transition VRR active -> inactive:
9090                  * Allow vblank irq disable again for fixed refresh rate.
9091                  */
9092                 dm_set_vupdate_irq(new_state->base.crtc, false);
9093                 drm_crtc_vblank_put(new_state->base.crtc);
9094                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9095                                  __func__, new_state->base.crtc->base.id);
9096         }
9097 }
9098
9099 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9100 {
9101         struct drm_plane *plane;
9102         struct drm_plane_state *old_plane_state;
9103         int i;
9104
9105         /*
9106          * TODO: Make this per-stream so we don't issue redundant updates for
9107          * commits with multiple streams.
9108          */
9109         for_each_old_plane_in_state(state, plane, old_plane_state, i)
9110                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9111                         handle_cursor_update(plane, old_plane_state);
9112 }
9113
9114 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9115                                     struct dc_state *dc_state,
9116                                     struct drm_device *dev,
9117                                     struct amdgpu_display_manager *dm,
9118                                     struct drm_crtc *pcrtc,
9119                                     bool wait_for_vblank)
9120 {
9121         uint32_t i;
9122         uint64_t timestamp_ns;
9123         struct drm_plane *plane;
9124         struct drm_plane_state *old_plane_state, *new_plane_state;
9125         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9126         struct drm_crtc_state *new_pcrtc_state =
9127                         drm_atomic_get_new_crtc_state(state, pcrtc);
9128         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9129         struct dm_crtc_state *dm_old_crtc_state =
9130                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9131         int planes_count = 0, vpos, hpos;
9132         long r;
9133         unsigned long flags;
9134         struct amdgpu_bo *abo;
9135         uint32_t target_vblank, last_flip_vblank;
9136         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9137         bool pflip_present = false;
9138         struct {
9139                 struct dc_surface_update surface_updates[MAX_SURFACES];
9140                 struct dc_plane_info plane_infos[MAX_SURFACES];
9141                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9142                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9143                 struct dc_stream_update stream_update;
9144         } *bundle;
9145
9146         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9147
9148         if (!bundle) {
9149                 dm_error("Failed to allocate update bundle\n");
9150                 goto cleanup;
9151         }
9152
9153         /*
9154          * Disable the cursor first if we're disabling all the planes.
9155          * It'll remain on the screen after the planes are re-enabled
9156          * if we don't.
9157          */
9158         if (acrtc_state->active_planes == 0)
9159                 amdgpu_dm_commit_cursors(state);
9160
9161         /* update planes when needed */
9162         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9163                 struct drm_crtc *crtc = new_plane_state->crtc;
9164                 struct drm_crtc_state *new_crtc_state;
9165                 struct drm_framebuffer *fb = new_plane_state->fb;
9166                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9167                 bool plane_needs_flip;
9168                 struct dc_plane_state *dc_plane;
9169                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9170
9171                 /* Cursor plane is handled after stream updates */
9172                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9173                         continue;
9174
9175                 if (!fb || !crtc || pcrtc != crtc)
9176                         continue;
9177
9178                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9179                 if (!new_crtc_state->active)
9180                         continue;
9181
9182                 dc_plane = dm_new_plane_state->dc_state;
9183
9184                 bundle->surface_updates[planes_count].surface = dc_plane;
9185                 if (new_pcrtc_state->color_mgmt_changed) {
9186                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9187                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9188                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9189                 }
9190
9191                 fill_dc_scaling_info(dm->adev, new_plane_state,
9192                                      &bundle->scaling_infos[planes_count]);
9193
9194                 bundle->surface_updates[planes_count].scaling_info =
9195                         &bundle->scaling_infos[planes_count];
9196
9197                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9198
9199                 pflip_present = pflip_present || plane_needs_flip;
9200
9201                 if (!plane_needs_flip) {
9202                         planes_count += 1;
9203                         continue;
9204                 }
9205
9206                 abo = gem_to_amdgpu_bo(fb->obj[0]);
9207
9208                 /*
9209                  * Wait for all fences on this FB. Do limited wait to avoid
9210                  * deadlock during GPU reset when this fence will not signal
9211                  * but we hold reservation lock for the BO.
9212                  */
9213                 r = dma_resv_wait_timeout(abo->tbo.base.resv,
9214                                           DMA_RESV_USAGE_WRITE, false,
9215                                           msecs_to_jiffies(5000));
9216                 if (unlikely(r <= 0))
9217                         DRM_ERROR("Waiting for fences timed out!");
9218
9219                 fill_dc_plane_info_and_addr(
9220                         dm->adev, new_plane_state,
9221                         afb->tiling_flags,
9222                         &bundle->plane_infos[planes_count],
9223                         &bundle->flip_addrs[planes_count].address,
9224                         afb->tmz_surface, false);
9225
9226                 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
9227                                  new_plane_state->plane->index,
9228                                  bundle->plane_infos[planes_count].dcc.enable);
9229
9230                 bundle->surface_updates[planes_count].plane_info =
9231                         &bundle->plane_infos[planes_count];
9232
9233                 /*
9234                  * Only allow immediate flips for fast updates that don't
9235                  * change FB pitch, DCC state, rotation or mirroing.
9236                  */
9237                 bundle->flip_addrs[planes_count].flip_immediate =
9238                         crtc->state->async_flip &&
9239                         acrtc_state->update_type == UPDATE_TYPE_FAST;
9240
9241                 timestamp_ns = ktime_get_ns();
9242                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9243                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9244                 bundle->surface_updates[planes_count].surface = dc_plane;
9245
9246                 if (!bundle->surface_updates[planes_count].surface) {
9247                         DRM_ERROR("No surface for CRTC: id=%d\n",
9248                                         acrtc_attach->crtc_id);
9249                         continue;
9250                 }
9251
9252                 if (plane == pcrtc->primary)
9253                         update_freesync_state_on_stream(
9254                                 dm,
9255                                 acrtc_state,
9256                                 acrtc_state->stream,
9257                                 dc_plane,
9258                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9259
9260                 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
9261                                  __func__,
9262                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9263                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9264
9265                 planes_count += 1;
9266
9267         }
9268
9269         if (pflip_present) {
9270                 if (!vrr_active) {
9271                         /* Use old throttling in non-vrr fixed refresh rate mode
9272                          * to keep flip scheduling based on target vblank counts
9273                          * working in a backwards compatible way, e.g., for
9274                          * clients using the GLX_OML_sync_control extension or
9275                          * DRI3/Present extension with defined target_msc.
9276                          */
9277                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9278                 }
9279                 else {
9280                         /* For variable refresh rate mode only:
9281                          * Get vblank of last completed flip to avoid > 1 vrr
9282                          * flips per video frame by use of throttling, but allow
9283                          * flip programming anywhere in the possibly large
9284                          * variable vrr vblank interval for fine-grained flip
9285                          * timing control and more opportunity to avoid stutter
9286                          * on late submission of flips.
9287                          */
9288                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9289                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9290                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9291                 }
9292
9293                 target_vblank = last_flip_vblank + wait_for_vblank;
9294
9295                 /*
9296                  * Wait until we're out of the vertical blank period before the one
9297                  * targeted by the flip
9298                  */
9299                 while ((acrtc_attach->enabled &&
9300                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9301                                                             0, &vpos, &hpos, NULL,
9302                                                             NULL, &pcrtc->hwmode)
9303                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9304                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9305                         (int)(target_vblank -
9306                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9307                         usleep_range(1000, 1100);
9308                 }
9309
9310                 /**
9311                  * Prepare the flip event for the pageflip interrupt to handle.
9312                  *
9313                  * This only works in the case where we've already turned on the
9314                  * appropriate hardware blocks (eg. HUBP) so in the transition case
9315                  * from 0 -> n planes we have to skip a hardware generated event
9316                  * and rely on sending it from software.
9317                  */
9318                 if (acrtc_attach->base.state->event &&
9319                     acrtc_state->active_planes > 0 &&
9320                     !acrtc_state->force_dpms_off) {
9321                         drm_crtc_vblank_get(pcrtc);
9322
9323                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9324
9325                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9326                         prepare_flip_isr(acrtc_attach);
9327
9328                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9329                 }
9330
9331                 if (acrtc_state->stream) {
9332                         if (acrtc_state->freesync_vrr_info_changed)
9333                                 bundle->stream_update.vrr_infopacket =
9334                                         &acrtc_state->stream->vrr_infopacket;
9335                 }
9336         }
9337
9338         /* Update the planes if changed or disable if we don't have any. */
9339         if ((planes_count || acrtc_state->active_planes == 0) &&
9340                 acrtc_state->stream) {
9341                 /*
9342                  * If PSR or idle optimizations are enabled then flush out
9343                  * any pending work before hardware programming.
9344                  */
9345                 if (dm->vblank_control_workqueue)
9346                         flush_workqueue(dm->vblank_control_workqueue);
9347
9348                 bundle->stream_update.stream = acrtc_state->stream;
9349                 if (new_pcrtc_state->mode_changed) {
9350                         bundle->stream_update.src = acrtc_state->stream->src;
9351                         bundle->stream_update.dst = acrtc_state->stream->dst;
9352                 }
9353
9354                 if (new_pcrtc_state->color_mgmt_changed) {
9355                         /*
9356                          * TODO: This isn't fully correct since we've actually
9357                          * already modified the stream in place.
9358                          */
9359                         bundle->stream_update.gamut_remap =
9360                                 &acrtc_state->stream->gamut_remap_matrix;
9361                         bundle->stream_update.output_csc_transform =
9362                                 &acrtc_state->stream->csc_color_matrix;
9363                         bundle->stream_update.out_transfer_func =
9364                                 acrtc_state->stream->out_transfer_func;
9365                 }
9366
9367                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9368                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9369                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
9370
9371                 /*
9372                  * If FreeSync state on the stream has changed then we need to
9373                  * re-adjust the min/max bounds now that DC doesn't handle this
9374                  * as part of commit.
9375                  */
9376                 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9377                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9378                         dc_stream_adjust_vmin_vmax(
9379                                 dm->dc, acrtc_state->stream,
9380                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9381                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9382                 }
9383                 mutex_lock(&dm->dc_lock);
9384                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9385                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
9386                         amdgpu_dm_psr_disable(acrtc_state->stream);
9387
9388                 dc_commit_updates_for_stream(dm->dc,
9389                                                      bundle->surface_updates,
9390                                                      planes_count,
9391                                                      acrtc_state->stream,
9392                                                      &bundle->stream_update,
9393                                                      dc_state);
9394
9395                 /**
9396                  * Enable or disable the interrupts on the backend.
9397                  *
9398                  * Most pipes are put into power gating when unused.
9399                  *
9400                  * When power gating is enabled on a pipe we lose the
9401                  * interrupt enablement state when power gating is disabled.
9402                  *
9403                  * So we need to update the IRQ control state in hardware
9404                  * whenever the pipe turns on (since it could be previously
9405                  * power gated) or off (since some pipes can't be power gated
9406                  * on some ASICs).
9407                  */
9408                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9409                         dm_update_pflip_irq_state(drm_to_adev(dev),
9410                                                   acrtc_attach);
9411
9412                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9413                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9414                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9415                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
9416
9417                 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9418                 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9419                     acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9420                         struct amdgpu_dm_connector *aconn =
9421                                 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9422
9423                         if (aconn->psr_skip_count > 0)
9424                                 aconn->psr_skip_count--;
9425
9426                         /* Allow PSR when skip count is 0. */
9427                         acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9428                 } else {
9429                         acrtc_attach->dm_irq_params.allow_psr_entry = false;
9430                 }
9431
9432                 mutex_unlock(&dm->dc_lock);
9433         }
9434
9435         /*
9436          * Update cursor state *after* programming all the planes.
9437          * This avoids redundant programming in the case where we're going
9438          * to be disabling a single plane - those pipes are being disabled.
9439          */
9440         if (acrtc_state->active_planes)
9441                 amdgpu_dm_commit_cursors(state);
9442
9443 cleanup:
9444         kfree(bundle);
9445 }
9446
9447 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9448                                    struct drm_atomic_state *state)
9449 {
9450         struct amdgpu_device *adev = drm_to_adev(dev);
9451         struct amdgpu_dm_connector *aconnector;
9452         struct drm_connector *connector;
9453         struct drm_connector_state *old_con_state, *new_con_state;
9454         struct drm_crtc_state *new_crtc_state;
9455         struct dm_crtc_state *new_dm_crtc_state;
9456         const struct dc_stream_status *status;
9457         int i, inst;
9458
9459         /* Notify device removals. */
9460         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9461                 if (old_con_state->crtc != new_con_state->crtc) {
9462                         /* CRTC changes require notification. */
9463                         goto notify;
9464                 }
9465
9466                 if (!new_con_state->crtc)
9467                         continue;
9468
9469                 new_crtc_state = drm_atomic_get_new_crtc_state(
9470                         state, new_con_state->crtc);
9471
9472                 if (!new_crtc_state)
9473                         continue;
9474
9475                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9476                         continue;
9477
9478         notify:
9479                 aconnector = to_amdgpu_dm_connector(connector);
9480
9481                 mutex_lock(&adev->dm.audio_lock);
9482                 inst = aconnector->audio_inst;
9483                 aconnector->audio_inst = -1;
9484                 mutex_unlock(&adev->dm.audio_lock);
9485
9486                 amdgpu_dm_audio_eld_notify(adev, inst);
9487         }
9488
9489         /* Notify audio device additions. */
9490         for_each_new_connector_in_state(state, connector, new_con_state, i) {
9491                 if (!new_con_state->crtc)
9492                         continue;
9493
9494                 new_crtc_state = drm_atomic_get_new_crtc_state(
9495                         state, new_con_state->crtc);
9496
9497                 if (!new_crtc_state)
9498                         continue;
9499
9500                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9501                         continue;
9502
9503                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9504                 if (!new_dm_crtc_state->stream)
9505                         continue;
9506
9507                 status = dc_stream_get_status(new_dm_crtc_state->stream);
9508                 if (!status)
9509                         continue;
9510
9511                 aconnector = to_amdgpu_dm_connector(connector);
9512
9513                 mutex_lock(&adev->dm.audio_lock);
9514                 inst = status->audio_inst;
9515                 aconnector->audio_inst = inst;
9516                 mutex_unlock(&adev->dm.audio_lock);
9517
9518                 amdgpu_dm_audio_eld_notify(adev, inst);
9519         }
9520 }
9521
9522 /*
9523  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9524  * @crtc_state: the DRM CRTC state
9525  * @stream_state: the DC stream state.
9526  *
9527  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9528  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9529  */
9530 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9531                                                 struct dc_stream_state *stream_state)
9532 {
9533         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9534 }
9535
9536 /**
9537  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9538  * @state: The atomic state to commit
9539  *
9540  * This will tell DC to commit the constructed DC state from atomic_check,
9541  * programming the hardware. Any failures here implies a hardware failure, since
9542  * atomic check should have filtered anything non-kosher.
9543  */
9544 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9545 {
9546         struct drm_device *dev = state->dev;
9547         struct amdgpu_device *adev = drm_to_adev(dev);
9548         struct amdgpu_display_manager *dm = &adev->dm;
9549         struct dm_atomic_state *dm_state;
9550         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9551         uint32_t i, j;
9552         struct drm_crtc *crtc;
9553         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9554         unsigned long flags;
9555         bool wait_for_vblank = true;
9556         struct drm_connector *connector;
9557         struct drm_connector_state *old_con_state, *new_con_state;
9558         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9559         int crtc_disable_count = 0;
9560         bool mode_set_reset_required = false;
9561
9562         trace_amdgpu_dm_atomic_commit_tail_begin(state);
9563
9564         drm_atomic_helper_update_legacy_modeset_state(dev, state);
9565
9566         dm_state = dm_atomic_get_new_state(state);
9567         if (dm_state && dm_state->context) {
9568                 dc_state = dm_state->context;
9569         } else {
9570                 /* No state changes, retain current state. */
9571                 dc_state_temp = dc_create_state(dm->dc);
9572                 ASSERT(dc_state_temp);
9573                 dc_state = dc_state_temp;
9574                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9575         }
9576
9577         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9578                                        new_crtc_state, i) {
9579                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9580
9581                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9582
9583                 if (old_crtc_state->active &&
9584                     (!new_crtc_state->active ||
9585                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9586                         manage_dm_interrupts(adev, acrtc, false);
9587                         dc_stream_release(dm_old_crtc_state->stream);
9588                 }
9589         }
9590
9591         drm_atomic_helper_calc_timestamping_constants(state);
9592
9593         /* update changed items */
9594         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9595                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9596
9597                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9598                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9599
9600                 drm_dbg_state(state->dev,
9601                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9602                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9603                         "connectors_changed:%d\n",
9604                         acrtc->crtc_id,
9605                         new_crtc_state->enable,
9606                         new_crtc_state->active,
9607                         new_crtc_state->planes_changed,
9608                         new_crtc_state->mode_changed,
9609                         new_crtc_state->active_changed,
9610                         new_crtc_state->connectors_changed);
9611
9612                 /* Disable cursor if disabling crtc */
9613                 if (old_crtc_state->active && !new_crtc_state->active) {
9614                         struct dc_cursor_position position;
9615
9616                         memset(&position, 0, sizeof(position));
9617                         mutex_lock(&dm->dc_lock);
9618                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9619                         mutex_unlock(&dm->dc_lock);
9620                 }
9621
9622                 /* Copy all transient state flags into dc state */
9623                 if (dm_new_crtc_state->stream) {
9624                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9625                                                             dm_new_crtc_state->stream);
9626                 }
9627
9628                 /* handles headless hotplug case, updating new_state and
9629                  * aconnector as needed
9630                  */
9631
9632                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9633
9634                         DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9635
9636                         if (!dm_new_crtc_state->stream) {
9637                                 /*
9638                                  * this could happen because of issues with
9639                                  * userspace notifications delivery.
9640                                  * In this case userspace tries to set mode on
9641                                  * display which is disconnected in fact.
9642                                  * dc_sink is NULL in this case on aconnector.
9643                                  * We expect reset mode will come soon.
9644                                  *
9645                                  * This can also happen when unplug is done
9646                                  * during resume sequence ended
9647                                  *
9648                                  * In this case, we want to pretend we still
9649                                  * have a sink to keep the pipe running so that
9650                                  * hw state is consistent with the sw state
9651                                  */
9652                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9653                                                 __func__, acrtc->base.base.id);
9654                                 continue;
9655                         }
9656
9657                         if (dm_old_crtc_state->stream)
9658                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9659
9660                         pm_runtime_get_noresume(dev->dev);
9661
9662                         acrtc->enabled = true;
9663                         acrtc->hw_mode = new_crtc_state->mode;
9664                         crtc->hwmode = new_crtc_state->mode;
9665                         mode_set_reset_required = true;
9666                 } else if (modereset_required(new_crtc_state)) {
9667                         DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9668                         /* i.e. reset mode */
9669                         if (dm_old_crtc_state->stream)
9670                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9671
9672                         mode_set_reset_required = true;
9673                 }
9674         } /* for_each_crtc_in_state() */
9675
9676         if (dc_state) {
9677                 /* if there mode set or reset, disable eDP PSR */
9678                 if (mode_set_reset_required) {
9679                         if (dm->vblank_control_workqueue)
9680                                 flush_workqueue(dm->vblank_control_workqueue);
9681
9682                         amdgpu_dm_psr_disable_all(dm);
9683                 }
9684
9685                 dm_enable_per_frame_crtc_master_sync(dc_state);
9686                 mutex_lock(&dm->dc_lock);
9687                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9688
9689                 /* Allow idle optimization when vblank count is 0 for display off */
9690                 if (dm->active_vblank_irq_count == 0)
9691                         dc_allow_idle_optimizations(dm->dc, true);
9692                 mutex_unlock(&dm->dc_lock);
9693         }
9694
9695         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9696                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9697
9698                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9699
9700                 if (dm_new_crtc_state->stream != NULL) {
9701                         const struct dc_stream_status *status =
9702                                         dc_stream_get_status(dm_new_crtc_state->stream);
9703
9704                         if (!status)
9705                                 status = dc_stream_get_status_from_state(dc_state,
9706                                                                          dm_new_crtc_state->stream);
9707                         if (!status)
9708                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9709                         else
9710                                 acrtc->otg_inst = status->primary_otg_inst;
9711                 }
9712         }
9713 #ifdef CONFIG_DRM_AMD_DC_HDCP
9714         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9715                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9716                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9717                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9718
9719                 new_crtc_state = NULL;
9720
9721                 if (acrtc)
9722                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9723
9724                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9725
9726                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9727                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9728                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9729                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9730                         dm_new_con_state->update_hdcp = true;
9731                         continue;
9732                 }
9733
9734                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9735                         hdcp_update_display(
9736                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9737                                 new_con_state->hdcp_content_type,
9738                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9739         }
9740 #endif
9741
9742         /* Handle connector state changes */
9743         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9744                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9745                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9746                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9747                 struct dc_surface_update dummy_updates[MAX_SURFACES];
9748                 struct dc_stream_update stream_update;
9749                 struct dc_info_packet hdr_packet;
9750                 struct dc_stream_status *status = NULL;
9751                 bool abm_changed, hdr_changed, scaling_changed;
9752
9753                 memset(&dummy_updates, 0, sizeof(dummy_updates));
9754                 memset(&stream_update, 0, sizeof(stream_update));
9755
9756                 if (acrtc) {
9757                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9758                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9759                 }
9760
9761                 /* Skip any modesets/resets */
9762                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9763                         continue;
9764
9765                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9766                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9767
9768                 scaling_changed = is_scaling_state_different(dm_new_con_state,
9769                                                              dm_old_con_state);
9770
9771                 abm_changed = dm_new_crtc_state->abm_level !=
9772                               dm_old_crtc_state->abm_level;
9773
9774                 hdr_changed =
9775                         !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9776
9777                 if (!scaling_changed && !abm_changed && !hdr_changed)
9778                         continue;
9779
9780                 stream_update.stream = dm_new_crtc_state->stream;
9781                 if (scaling_changed) {
9782                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9783                                         dm_new_con_state, dm_new_crtc_state->stream);
9784
9785                         stream_update.src = dm_new_crtc_state->stream->src;
9786                         stream_update.dst = dm_new_crtc_state->stream->dst;
9787                 }
9788
9789                 if (abm_changed) {
9790                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9791
9792                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
9793                 }
9794
9795                 if (hdr_changed) {
9796                         fill_hdr_info_packet(new_con_state, &hdr_packet);
9797                         stream_update.hdr_static_metadata = &hdr_packet;
9798                 }
9799
9800                 status = dc_stream_get_status(dm_new_crtc_state->stream);
9801
9802                 if (WARN_ON(!status))
9803                         continue;
9804
9805                 WARN_ON(!status->plane_count);
9806
9807                 /*
9808                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
9809                  * Here we create an empty update on each plane.
9810                  * To fix this, DC should permit updating only stream properties.
9811                  */
9812                 for (j = 0; j < status->plane_count; j++)
9813                         dummy_updates[j].surface = status->plane_states[0];
9814
9815
9816                 mutex_lock(&dm->dc_lock);
9817                 dc_commit_updates_for_stream(dm->dc,
9818                                                      dummy_updates,
9819                                                      status->plane_count,
9820                                                      dm_new_crtc_state->stream,
9821                                                      &stream_update,
9822                                                      dc_state);
9823                 mutex_unlock(&dm->dc_lock);
9824         }
9825
9826         /* Count number of newly disabled CRTCs for dropping PM refs later. */
9827         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9828                                       new_crtc_state, i) {
9829                 if (old_crtc_state->active && !new_crtc_state->active)
9830                         crtc_disable_count++;
9831
9832                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9833                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9834
9835                 /* For freesync config update on crtc state and params for irq */
9836                 update_stream_irq_parameters(dm, dm_new_crtc_state);
9837
9838                 /* Handle vrr on->off / off->on transitions */
9839                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9840                                                 dm_new_crtc_state);
9841         }
9842
9843         /**
9844          * Enable interrupts for CRTCs that are newly enabled or went through
9845          * a modeset. It was intentionally deferred until after the front end
9846          * state was modified to wait until the OTG was on and so the IRQ
9847          * handlers didn't access stale or invalid state.
9848          */
9849         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9850                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9851 #ifdef CONFIG_DEBUG_FS
9852                 bool configure_crc = false;
9853                 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9854 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9855                 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9856 #endif
9857                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9858                 cur_crc_src = acrtc->dm_irq_params.crc_src;
9859                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9860 #endif
9861                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9862
9863                 if (new_crtc_state->active &&
9864                     (!old_crtc_state->active ||
9865                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9866                         dc_stream_retain(dm_new_crtc_state->stream);
9867                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9868                         manage_dm_interrupts(adev, acrtc, true);
9869
9870 #ifdef CONFIG_DEBUG_FS
9871                         /**
9872                          * Frontend may have changed so reapply the CRC capture
9873                          * settings for the stream.
9874                          */
9875                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9876
9877                         if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9878                                 configure_crc = true;
9879 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9880                                 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9881                                         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9882                                         acrtc->dm_irq_params.crc_window.update_win = true;
9883                                         acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9884                                         spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9885                                         crc_rd_wrk->crtc = crtc;
9886                                         spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9887                                         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9888                                 }
9889 #endif
9890                         }
9891
9892                         if (configure_crc)
9893                                 if (amdgpu_dm_crtc_configure_crc_source(
9894                                         crtc, dm_new_crtc_state, cur_crc_src))
9895                                         DRM_DEBUG_DRIVER("Failed to configure crc source");
9896 #endif
9897                 }
9898         }
9899
9900         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9901                 if (new_crtc_state->async_flip)
9902                         wait_for_vblank = false;
9903
9904         /* update planes when needed per crtc*/
9905         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9906                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9907
9908                 if (dm_new_crtc_state->stream)
9909                         amdgpu_dm_commit_planes(state, dc_state, dev,
9910                                                 dm, crtc, wait_for_vblank);
9911         }
9912
9913         /* Update audio instances for each connector. */
9914         amdgpu_dm_commit_audio(dev, state);
9915
9916 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||           \
9917         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9918         /* restore the backlight level */
9919         for (i = 0; i < dm->num_of_edps; i++) {
9920                 if (dm->backlight_dev[i] &&
9921                     (dm->actual_brightness[i] != dm->brightness[i]))
9922                         amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9923         }
9924 #endif
9925         /*
9926          * send vblank event on all events not handled in flip and
9927          * mark consumed event for drm_atomic_helper_commit_hw_done
9928          */
9929         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9930         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9931
9932                 if (new_crtc_state->event)
9933                         drm_send_event_locked(dev, &new_crtc_state->event->base);
9934
9935                 new_crtc_state->event = NULL;
9936         }
9937         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9938
9939         /* Signal HW programming completion */
9940         drm_atomic_helper_commit_hw_done(state);
9941
9942         if (wait_for_vblank)
9943                 drm_atomic_helper_wait_for_flip_done(dev, state);
9944
9945         drm_atomic_helper_cleanup_planes(dev, state);
9946
9947         /* return the stolen vga memory back to VRAM */
9948         if (!adev->mman.keep_stolen_vga_memory)
9949                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9950         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9951
9952         /*
9953          * Finally, drop a runtime PM reference for each newly disabled CRTC,
9954          * so we can put the GPU into runtime suspend if we're not driving any
9955          * displays anymore
9956          */
9957         for (i = 0; i < crtc_disable_count; i++)
9958                 pm_runtime_put_autosuspend(dev->dev);
9959         pm_runtime_mark_last_busy(dev->dev);
9960
9961         if (dc_state_temp)
9962                 dc_release_state(dc_state_temp);
9963 }
9964
9965
9966 static int dm_force_atomic_commit(struct drm_connector *connector)
9967 {
9968         int ret = 0;
9969         struct drm_device *ddev = connector->dev;
9970         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9971         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9972         struct drm_plane *plane = disconnected_acrtc->base.primary;
9973         struct drm_connector_state *conn_state;
9974         struct drm_crtc_state *crtc_state;
9975         struct drm_plane_state *plane_state;
9976
9977         if (!state)
9978                 return -ENOMEM;
9979
9980         state->acquire_ctx = ddev->mode_config.acquire_ctx;
9981
9982         /* Construct an atomic state to restore previous display setting */
9983
9984         /*
9985          * Attach connectors to drm_atomic_state
9986          */
9987         conn_state = drm_atomic_get_connector_state(state, connector);
9988
9989         ret = PTR_ERR_OR_ZERO(conn_state);
9990         if (ret)
9991                 goto out;
9992
9993         /* Attach crtc to drm_atomic_state*/
9994         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9995
9996         ret = PTR_ERR_OR_ZERO(crtc_state);
9997         if (ret)
9998                 goto out;
9999
10000         /* force a restore */
10001         crtc_state->mode_changed = true;
10002
10003         /* Attach plane to drm_atomic_state */
10004         plane_state = drm_atomic_get_plane_state(state, plane);
10005
10006         ret = PTR_ERR_OR_ZERO(plane_state);
10007         if (ret)
10008                 goto out;
10009
10010         /* Call commit internally with the state we just constructed */
10011         ret = drm_atomic_commit(state);
10012
10013 out:
10014         drm_atomic_state_put(state);
10015         if (ret)
10016                 DRM_ERROR("Restoring old state failed with %i\n", ret);
10017
10018         return ret;
10019 }
10020
10021 /*
10022  * This function handles all cases when set mode does not come upon hotplug.
10023  * This includes when a display is unplugged then plugged back into the
10024  * same port and when running without usermode desktop manager supprot
10025  */
10026 void dm_restore_drm_connector_state(struct drm_device *dev,
10027                                     struct drm_connector *connector)
10028 {
10029         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
10030         struct amdgpu_crtc *disconnected_acrtc;
10031         struct dm_crtc_state *acrtc_state;
10032
10033         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
10034                 return;
10035
10036         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
10037         if (!disconnected_acrtc)
10038                 return;
10039
10040         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
10041         if (!acrtc_state->stream)
10042                 return;
10043
10044         /*
10045          * If the previous sink is not released and different from the current,
10046          * we deduce we are in a state where we can not rely on usermode call
10047          * to turn on the display, so we do it here
10048          */
10049         if (acrtc_state->stream->sink != aconnector->dc_sink)
10050                 dm_force_atomic_commit(&aconnector->base);
10051 }
10052
10053 /*
10054  * Grabs all modesetting locks to serialize against any blocking commits,
10055  * Waits for completion of all non blocking commits.
10056  */
10057 static int do_aquire_global_lock(struct drm_device *dev,
10058                                  struct drm_atomic_state *state)
10059 {
10060         struct drm_crtc *crtc;
10061         struct drm_crtc_commit *commit;
10062         long ret;
10063
10064         /*
10065          * Adding all modeset locks to aquire_ctx will
10066          * ensure that when the framework release it the
10067          * extra locks we are locking here will get released to
10068          */
10069         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10070         if (ret)
10071                 return ret;
10072
10073         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10074                 spin_lock(&crtc->commit_lock);
10075                 commit = list_first_entry_or_null(&crtc->commit_list,
10076                                 struct drm_crtc_commit, commit_entry);
10077                 if (commit)
10078                         drm_crtc_commit_get(commit);
10079                 spin_unlock(&crtc->commit_lock);
10080
10081                 if (!commit)
10082                         continue;
10083
10084                 /*
10085                  * Make sure all pending HW programming completed and
10086                  * page flips done
10087                  */
10088                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10089
10090                 if (ret > 0)
10091                         ret = wait_for_completion_interruptible_timeout(
10092                                         &commit->flip_done, 10*HZ);
10093
10094                 if (ret == 0)
10095                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10096                                   "timed out\n", crtc->base.id, crtc->name);
10097
10098                 drm_crtc_commit_put(commit);
10099         }
10100
10101         return ret < 0 ? ret : 0;
10102 }
10103
10104 static void get_freesync_config_for_crtc(
10105         struct dm_crtc_state *new_crtc_state,
10106         struct dm_connector_state *new_con_state)
10107 {
10108         struct mod_freesync_config config = {0};
10109         struct amdgpu_dm_connector *aconnector =
10110                         to_amdgpu_dm_connector(new_con_state->base.connector);
10111         struct drm_display_mode *mode = &new_crtc_state->base.mode;
10112         int vrefresh = drm_mode_vrefresh(mode);
10113         bool fs_vid_mode = false;
10114
10115         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10116                                         vrefresh >= aconnector->min_vfreq &&
10117                                         vrefresh <= aconnector->max_vfreq;
10118
10119         if (new_crtc_state->vrr_supported) {
10120                 new_crtc_state->stream->ignore_msa_timing_param = true;
10121                 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10122
10123                 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10124                 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10125                 config.vsif_supported = true;
10126                 config.btr = true;
10127
10128                 if (fs_vid_mode) {
10129                         config.state = VRR_STATE_ACTIVE_FIXED;
10130                         config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10131                         goto out;
10132                 } else if (new_crtc_state->base.vrr_enabled) {
10133                         config.state = VRR_STATE_ACTIVE_VARIABLE;
10134                 } else {
10135                         config.state = VRR_STATE_INACTIVE;
10136                 }
10137         }
10138 out:
10139         new_crtc_state->freesync_config = config;
10140 }
10141
10142 static void reset_freesync_config_for_crtc(
10143         struct dm_crtc_state *new_crtc_state)
10144 {
10145         new_crtc_state->vrr_supported = false;
10146
10147         memset(&new_crtc_state->vrr_infopacket, 0,
10148                sizeof(new_crtc_state->vrr_infopacket));
10149 }
10150
10151 static bool
10152 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10153                                  struct drm_crtc_state *new_crtc_state)
10154 {
10155         const struct drm_display_mode *old_mode, *new_mode;
10156
10157         if (!old_crtc_state || !new_crtc_state)
10158                 return false;
10159
10160         old_mode = &old_crtc_state->mode;
10161         new_mode = &new_crtc_state->mode;
10162
10163         if (old_mode->clock       == new_mode->clock &&
10164             old_mode->hdisplay    == new_mode->hdisplay &&
10165             old_mode->vdisplay    == new_mode->vdisplay &&
10166             old_mode->htotal      == new_mode->htotal &&
10167             old_mode->vtotal      != new_mode->vtotal &&
10168             old_mode->hsync_start == new_mode->hsync_start &&
10169             old_mode->vsync_start != new_mode->vsync_start &&
10170             old_mode->hsync_end   == new_mode->hsync_end &&
10171             old_mode->vsync_end   != new_mode->vsync_end &&
10172             old_mode->hskew       == new_mode->hskew &&
10173             old_mode->vscan       == new_mode->vscan &&
10174             (old_mode->vsync_end - old_mode->vsync_start) ==
10175             (new_mode->vsync_end - new_mode->vsync_start))
10176                 return true;
10177
10178         return false;
10179 }
10180
10181 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10182         uint64_t num, den, res;
10183         struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10184
10185         dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10186
10187         num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10188         den = (unsigned long long)new_crtc_state->mode.htotal *
10189               (unsigned long long)new_crtc_state->mode.vtotal;
10190
10191         res = div_u64(num, den);
10192         dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10193 }
10194
10195 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10196                          struct drm_atomic_state *state,
10197                          struct drm_crtc *crtc,
10198                          struct drm_crtc_state *old_crtc_state,
10199                          struct drm_crtc_state *new_crtc_state,
10200                          bool enable,
10201                          bool *lock_and_validation_needed)
10202 {
10203         struct dm_atomic_state *dm_state = NULL;
10204         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10205         struct dc_stream_state *new_stream;
10206         int ret = 0;
10207
10208         /*
10209          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10210          * update changed items
10211          */
10212         struct amdgpu_crtc *acrtc = NULL;
10213         struct amdgpu_dm_connector *aconnector = NULL;
10214         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10215         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10216
10217         new_stream = NULL;
10218
10219         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10220         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10221         acrtc = to_amdgpu_crtc(crtc);
10222         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10223
10224         /* TODO This hack should go away */
10225         if (aconnector && enable) {
10226                 /* Make sure fake sink is created in plug-in scenario */
10227                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10228                                                             &aconnector->base);
10229                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10230                                                             &aconnector->base);
10231
10232                 if (IS_ERR(drm_new_conn_state)) {
10233                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10234                         goto fail;
10235                 }
10236
10237                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10238                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10239
10240                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10241                         goto skip_modeset;
10242
10243                 new_stream = create_validate_stream_for_sink(aconnector,
10244                                                              &new_crtc_state->mode,
10245                                                              dm_new_conn_state,
10246                                                              dm_old_crtc_state->stream);
10247
10248                 /*
10249                  * we can have no stream on ACTION_SET if a display
10250                  * was disconnected during S3, in this case it is not an
10251                  * error, the OS will be updated after detection, and
10252                  * will do the right thing on next atomic commit
10253                  */
10254
10255                 if (!new_stream) {
10256                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10257                                         __func__, acrtc->base.base.id);
10258                         ret = -ENOMEM;
10259                         goto fail;
10260                 }
10261
10262                 /*
10263                  * TODO: Check VSDB bits to decide whether this should
10264                  * be enabled or not.
10265                  */
10266                 new_stream->triggered_crtc_reset.enabled =
10267                         dm->force_timing_sync;
10268
10269                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10270
10271                 ret = fill_hdr_info_packet(drm_new_conn_state,
10272                                            &new_stream->hdr_static_metadata);
10273                 if (ret)
10274                         goto fail;
10275
10276                 /*
10277                  * If we already removed the old stream from the context
10278                  * (and set the new stream to NULL) then we can't reuse
10279                  * the old stream even if the stream and scaling are unchanged.
10280                  * We'll hit the BUG_ON and black screen.
10281                  *
10282                  * TODO: Refactor this function to allow this check to work
10283                  * in all conditions.
10284                  */
10285                 if (dm_new_crtc_state->stream &&
10286                     is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10287                         goto skip_modeset;
10288
10289                 if (dm_new_crtc_state->stream &&
10290                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10291                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10292                         new_crtc_state->mode_changed = false;
10293                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10294                                          new_crtc_state->mode_changed);
10295                 }
10296         }
10297
10298         /* mode_changed flag may get updated above, need to check again */
10299         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10300                 goto skip_modeset;
10301
10302         drm_dbg_state(state->dev,
10303                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10304                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10305                 "connectors_changed:%d\n",
10306                 acrtc->crtc_id,
10307                 new_crtc_state->enable,
10308                 new_crtc_state->active,
10309                 new_crtc_state->planes_changed,
10310                 new_crtc_state->mode_changed,
10311                 new_crtc_state->active_changed,
10312                 new_crtc_state->connectors_changed);
10313
10314         /* Remove stream for any changed/disabled CRTC */
10315         if (!enable) {
10316
10317                 if (!dm_old_crtc_state->stream)
10318                         goto skip_modeset;
10319
10320                 if (dm_new_crtc_state->stream &&
10321                     is_timing_unchanged_for_freesync(new_crtc_state,
10322                                                      old_crtc_state)) {
10323                         new_crtc_state->mode_changed = false;
10324                         DRM_DEBUG_DRIVER(
10325                                 "Mode change not required for front porch change, "
10326                                 "setting mode_changed to %d",
10327                                 new_crtc_state->mode_changed);
10328
10329                         set_freesync_fixed_config(dm_new_crtc_state);
10330
10331                         goto skip_modeset;
10332                 } else if (aconnector &&
10333                            is_freesync_video_mode(&new_crtc_state->mode,
10334                                                   aconnector)) {
10335                         struct drm_display_mode *high_mode;
10336
10337                         high_mode = get_highest_refresh_rate_mode(aconnector, false);
10338                         if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10339                                 set_freesync_fixed_config(dm_new_crtc_state);
10340                         }
10341                 }
10342
10343                 ret = dm_atomic_get_state(state, &dm_state);
10344                 if (ret)
10345                         goto fail;
10346
10347                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10348                                 crtc->base.id);
10349
10350                 /* i.e. reset mode */
10351                 if (dc_remove_stream_from_ctx(
10352                                 dm->dc,
10353                                 dm_state->context,
10354                                 dm_old_crtc_state->stream) != DC_OK) {
10355                         ret = -EINVAL;
10356                         goto fail;
10357                 }
10358
10359                 dc_stream_release(dm_old_crtc_state->stream);
10360                 dm_new_crtc_state->stream = NULL;
10361
10362                 reset_freesync_config_for_crtc(dm_new_crtc_state);
10363
10364                 *lock_and_validation_needed = true;
10365
10366         } else {/* Add stream for any updated/enabled CRTC */
10367                 /*
10368                  * Quick fix to prevent NULL pointer on new_stream when
10369                  * added MST connectors not found in existing crtc_state in the chained mode
10370                  * TODO: need to dig out the root cause of that
10371                  */
10372                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10373                         goto skip_modeset;
10374
10375                 if (modereset_required(new_crtc_state))
10376                         goto skip_modeset;
10377
10378                 if (modeset_required(new_crtc_state, new_stream,
10379                                      dm_old_crtc_state->stream)) {
10380
10381                         WARN_ON(dm_new_crtc_state->stream);
10382
10383                         ret = dm_atomic_get_state(state, &dm_state);
10384                         if (ret)
10385                                 goto fail;
10386
10387                         dm_new_crtc_state->stream = new_stream;
10388
10389                         dc_stream_retain(new_stream);
10390
10391                         DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10392                                          crtc->base.id);
10393
10394                         if (dc_add_stream_to_ctx(
10395                                         dm->dc,
10396                                         dm_state->context,
10397                                         dm_new_crtc_state->stream) != DC_OK) {
10398                                 ret = -EINVAL;
10399                                 goto fail;
10400                         }
10401
10402                         *lock_and_validation_needed = true;
10403                 }
10404         }
10405
10406 skip_modeset:
10407         /* Release extra reference */
10408         if (new_stream)
10409                  dc_stream_release(new_stream);
10410
10411         /*
10412          * We want to do dc stream updates that do not require a
10413          * full modeset below.
10414          */
10415         if (!(enable && aconnector && new_crtc_state->active))
10416                 return 0;
10417         /*
10418          * Given above conditions, the dc state cannot be NULL because:
10419          * 1. We're in the process of enabling CRTCs (just been added
10420          *    to the dc context, or already is on the context)
10421          * 2. Has a valid connector attached, and
10422          * 3. Is currently active and enabled.
10423          * => The dc stream state currently exists.
10424          */
10425         BUG_ON(dm_new_crtc_state->stream == NULL);
10426
10427         /* Scaling or underscan settings */
10428         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10429                                 drm_atomic_crtc_needs_modeset(new_crtc_state))
10430                 update_stream_scaling_settings(
10431                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10432
10433         /* ABM settings */
10434         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10435
10436         /*
10437          * Color management settings. We also update color properties
10438          * when a modeset is needed, to ensure it gets reprogrammed.
10439          */
10440         if (dm_new_crtc_state->base.color_mgmt_changed ||
10441             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10442                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10443                 if (ret)
10444                         goto fail;
10445         }
10446
10447         /* Update Freesync settings. */
10448         get_freesync_config_for_crtc(dm_new_crtc_state,
10449                                      dm_new_conn_state);
10450
10451         return ret;
10452
10453 fail:
10454         if (new_stream)
10455                 dc_stream_release(new_stream);
10456         return ret;
10457 }
10458
10459 static bool should_reset_plane(struct drm_atomic_state *state,
10460                                struct drm_plane *plane,
10461                                struct drm_plane_state *old_plane_state,
10462                                struct drm_plane_state *new_plane_state)
10463 {
10464         struct drm_plane *other;
10465         struct drm_plane_state *old_other_state, *new_other_state;
10466         struct drm_crtc_state *new_crtc_state;
10467         int i;
10468
10469         /*
10470          * TODO: Remove this hack once the checks below are sufficient
10471          * enough to determine when we need to reset all the planes on
10472          * the stream.
10473          */
10474         if (state->allow_modeset)
10475                 return true;
10476
10477         /* Exit early if we know that we're adding or removing the plane. */
10478         if (old_plane_state->crtc != new_plane_state->crtc)
10479                 return true;
10480
10481         /* old crtc == new_crtc == NULL, plane not in context. */
10482         if (!new_plane_state->crtc)
10483                 return false;
10484
10485         new_crtc_state =
10486                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10487
10488         if (!new_crtc_state)
10489                 return true;
10490
10491         /* CRTC Degamma changes currently require us to recreate planes. */
10492         if (new_crtc_state->color_mgmt_changed)
10493                 return true;
10494
10495         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10496                 return true;
10497
10498         /*
10499          * If there are any new primary or overlay planes being added or
10500          * removed then the z-order can potentially change. To ensure
10501          * correct z-order and pipe acquisition the current DC architecture
10502          * requires us to remove and recreate all existing planes.
10503          *
10504          * TODO: Come up with a more elegant solution for this.
10505          */
10506         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10507                 struct amdgpu_framebuffer *old_afb, *new_afb;
10508                 if (other->type == DRM_PLANE_TYPE_CURSOR)
10509                         continue;
10510
10511                 if (old_other_state->crtc != new_plane_state->crtc &&
10512                     new_other_state->crtc != new_plane_state->crtc)
10513                         continue;
10514
10515                 if (old_other_state->crtc != new_other_state->crtc)
10516                         return true;
10517
10518                 /* Src/dst size and scaling updates. */
10519                 if (old_other_state->src_w != new_other_state->src_w ||
10520                     old_other_state->src_h != new_other_state->src_h ||
10521                     old_other_state->crtc_w != new_other_state->crtc_w ||
10522                     old_other_state->crtc_h != new_other_state->crtc_h)
10523                         return true;
10524
10525                 /* Rotation / mirroring updates. */
10526                 if (old_other_state->rotation != new_other_state->rotation)
10527                         return true;
10528
10529                 /* Blending updates. */
10530                 if (old_other_state->pixel_blend_mode !=
10531                     new_other_state->pixel_blend_mode)
10532                         return true;
10533
10534                 /* Alpha updates. */
10535                 if (old_other_state->alpha != new_other_state->alpha)
10536                         return true;
10537
10538                 /* Colorspace changes. */
10539                 if (old_other_state->color_range != new_other_state->color_range ||
10540                     old_other_state->color_encoding != new_other_state->color_encoding)
10541                         return true;
10542
10543                 /* Framebuffer checks fall at the end. */
10544                 if (!old_other_state->fb || !new_other_state->fb)
10545                         continue;
10546
10547                 /* Pixel format changes can require bandwidth updates. */
10548                 if (old_other_state->fb->format != new_other_state->fb->format)
10549                         return true;
10550
10551                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10552                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10553
10554                 /* Tiling and DCC changes also require bandwidth updates. */
10555                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10556                     old_afb->base.modifier != new_afb->base.modifier)
10557                         return true;
10558         }
10559
10560         return false;
10561 }
10562
10563 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10564                               struct drm_plane_state *new_plane_state,
10565                               struct drm_framebuffer *fb)
10566 {
10567         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10568         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10569         unsigned int pitch;
10570         bool linear;
10571
10572         if (fb->width > new_acrtc->max_cursor_width ||
10573             fb->height > new_acrtc->max_cursor_height) {
10574                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10575                                  new_plane_state->fb->width,
10576                                  new_plane_state->fb->height);
10577                 return -EINVAL;
10578         }
10579         if (new_plane_state->src_w != fb->width << 16 ||
10580             new_plane_state->src_h != fb->height << 16) {
10581                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10582                 return -EINVAL;
10583         }
10584
10585         /* Pitch in pixels */
10586         pitch = fb->pitches[0] / fb->format->cpp[0];
10587
10588         if (fb->width != pitch) {
10589                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10590                                  fb->width, pitch);
10591                 return -EINVAL;
10592         }
10593
10594         switch (pitch) {
10595         case 64:
10596         case 128:
10597         case 256:
10598                 /* FB pitch is supported by cursor plane */
10599                 break;
10600         default:
10601                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10602                 return -EINVAL;
10603         }
10604
10605         /* Core DRM takes care of checking FB modifiers, so we only need to
10606          * check tiling flags when the FB doesn't have a modifier. */
10607         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10608                 if (adev->family < AMDGPU_FAMILY_AI) {
10609                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10610                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10611                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10612                 } else {
10613                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10614                 }
10615                 if (!linear) {
10616                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
10617                         return -EINVAL;
10618                 }
10619         }
10620
10621         return 0;
10622 }
10623
10624 static int dm_update_plane_state(struct dc *dc,
10625                                  struct drm_atomic_state *state,
10626                                  struct drm_plane *plane,
10627                                  struct drm_plane_state *old_plane_state,
10628                                  struct drm_plane_state *new_plane_state,
10629                                  bool enable,
10630                                  bool *lock_and_validation_needed)
10631 {
10632
10633         struct dm_atomic_state *dm_state = NULL;
10634         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10635         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10636         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10637         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10638         struct amdgpu_crtc *new_acrtc;
10639         bool needs_reset;
10640         int ret = 0;
10641
10642
10643         new_plane_crtc = new_plane_state->crtc;
10644         old_plane_crtc = old_plane_state->crtc;
10645         dm_new_plane_state = to_dm_plane_state(new_plane_state);
10646         dm_old_plane_state = to_dm_plane_state(old_plane_state);
10647
10648         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10649                 if (!enable || !new_plane_crtc ||
10650                         drm_atomic_plane_disabling(plane->state, new_plane_state))
10651                         return 0;
10652
10653                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10654
10655                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10656                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10657                         return -EINVAL;
10658                 }
10659
10660                 if (new_plane_state->fb) {
10661                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10662                                                  new_plane_state->fb);
10663                         if (ret)
10664                                 return ret;
10665                 }
10666
10667                 return 0;
10668         }
10669
10670         needs_reset = should_reset_plane(state, plane, old_plane_state,
10671                                          new_plane_state);
10672
10673         /* Remove any changed/removed planes */
10674         if (!enable) {
10675                 if (!needs_reset)
10676                         return 0;
10677
10678                 if (!old_plane_crtc)
10679                         return 0;
10680
10681                 old_crtc_state = drm_atomic_get_old_crtc_state(
10682                                 state, old_plane_crtc);
10683                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10684
10685                 if (!dm_old_crtc_state->stream)
10686                         return 0;
10687
10688                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10689                                 plane->base.id, old_plane_crtc->base.id);
10690
10691                 ret = dm_atomic_get_state(state, &dm_state);
10692                 if (ret)
10693                         return ret;
10694
10695                 if (!dc_remove_plane_from_context(
10696                                 dc,
10697                                 dm_old_crtc_state->stream,
10698                                 dm_old_plane_state->dc_state,
10699                                 dm_state->context)) {
10700
10701                         return -EINVAL;
10702                 }
10703
10704
10705                 dc_plane_state_release(dm_old_plane_state->dc_state);
10706                 dm_new_plane_state->dc_state = NULL;
10707
10708                 *lock_and_validation_needed = true;
10709
10710         } else { /* Add new planes */
10711                 struct dc_plane_state *dc_new_plane_state;
10712
10713                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10714                         return 0;
10715
10716                 if (!new_plane_crtc)
10717                         return 0;
10718
10719                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10720                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10721
10722                 if (!dm_new_crtc_state->stream)
10723                         return 0;
10724
10725                 if (!needs_reset)
10726                         return 0;
10727
10728                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10729                 if (ret)
10730                         return ret;
10731
10732                 WARN_ON(dm_new_plane_state->dc_state);
10733
10734                 dc_new_plane_state = dc_create_plane_state(dc);
10735                 if (!dc_new_plane_state)
10736                         return -ENOMEM;
10737
10738                 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10739                                  plane->base.id, new_plane_crtc->base.id);
10740
10741                 ret = fill_dc_plane_attributes(
10742                         drm_to_adev(new_plane_crtc->dev),
10743                         dc_new_plane_state,
10744                         new_plane_state,
10745                         new_crtc_state);
10746                 if (ret) {
10747                         dc_plane_state_release(dc_new_plane_state);
10748                         return ret;
10749                 }
10750
10751                 ret = dm_atomic_get_state(state, &dm_state);
10752                 if (ret) {
10753                         dc_plane_state_release(dc_new_plane_state);
10754                         return ret;
10755                 }
10756
10757                 /*
10758                  * Any atomic check errors that occur after this will
10759                  * not need a release. The plane state will be attached
10760                  * to the stream, and therefore part of the atomic
10761                  * state. It'll be released when the atomic state is
10762                  * cleaned.
10763                  */
10764                 if (!dc_add_plane_to_context(
10765                                 dc,
10766                                 dm_new_crtc_state->stream,
10767                                 dc_new_plane_state,
10768                                 dm_state->context)) {
10769
10770                         dc_plane_state_release(dc_new_plane_state);
10771                         return -EINVAL;
10772                 }
10773
10774                 dm_new_plane_state->dc_state = dc_new_plane_state;
10775
10776                 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
10777
10778                 /* Tell DC to do a full surface update every time there
10779                  * is a plane change. Inefficient, but works for now.
10780                  */
10781                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10782
10783                 *lock_and_validation_needed = true;
10784         }
10785
10786
10787         return ret;
10788 }
10789
10790 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10791                                        int *src_w, int *src_h)
10792 {
10793         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10794         case DRM_MODE_ROTATE_90:
10795         case DRM_MODE_ROTATE_270:
10796                 *src_w = plane_state->src_h >> 16;
10797                 *src_h = plane_state->src_w >> 16;
10798                 break;
10799         case DRM_MODE_ROTATE_0:
10800         case DRM_MODE_ROTATE_180:
10801         default:
10802                 *src_w = plane_state->src_w >> 16;
10803                 *src_h = plane_state->src_h >> 16;
10804                 break;
10805         }
10806 }
10807
10808 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10809                                 struct drm_crtc *crtc,
10810                                 struct drm_crtc_state *new_crtc_state)
10811 {
10812         struct drm_plane *cursor = crtc->cursor, *underlying;
10813         struct drm_plane_state *new_cursor_state, *new_underlying_state;
10814         int i;
10815         int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10816         int cursor_src_w, cursor_src_h;
10817         int underlying_src_w, underlying_src_h;
10818
10819         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10820          * cursor per pipe but it's going to inherit the scaling and
10821          * positioning from the underlying pipe. Check the cursor plane's
10822          * blending properties match the underlying planes'. */
10823
10824         new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10825         if (!new_cursor_state || !new_cursor_state->fb) {
10826                 return 0;
10827         }
10828
10829         dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10830         cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10831         cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10832
10833         for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10834                 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10835                 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10836                         continue;
10837
10838                 /* Ignore disabled planes */
10839                 if (!new_underlying_state->fb)
10840                         continue;
10841
10842                 dm_get_oriented_plane_size(new_underlying_state,
10843                                            &underlying_src_w, &underlying_src_h);
10844                 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10845                 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10846
10847                 if (cursor_scale_w != underlying_scale_w ||
10848                     cursor_scale_h != underlying_scale_h) {
10849                         drm_dbg_atomic(crtc->dev,
10850                                        "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10851                                        cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10852                         return -EINVAL;
10853                 }
10854
10855                 /* If this plane covers the whole CRTC, no need to check planes underneath */
10856                 if (new_underlying_state->crtc_x <= 0 &&
10857                     new_underlying_state->crtc_y <= 0 &&
10858                     new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10859                     new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10860                         break;
10861         }
10862
10863         return 0;
10864 }
10865
10866 #if defined(CONFIG_DRM_AMD_DC_DCN)
10867 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10868 {
10869         struct drm_connector *connector;
10870         struct drm_connector_state *conn_state, *old_conn_state;
10871         struct amdgpu_dm_connector *aconnector = NULL;
10872         int i;
10873         for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10874                 if (!conn_state->crtc)
10875                         conn_state = old_conn_state;
10876
10877                 if (conn_state->crtc != crtc)
10878                         continue;
10879
10880                 aconnector = to_amdgpu_dm_connector(connector);
10881                 if (!aconnector->port || !aconnector->mst_port)
10882                         aconnector = NULL;
10883                 else
10884                         break;
10885         }
10886
10887         if (!aconnector)
10888                 return 0;
10889
10890         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10891 }
10892 #endif
10893
10894 /**
10895  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10896  * @dev: The DRM device
10897  * @state: The atomic state to commit
10898  *
10899  * Validate that the given atomic state is programmable by DC into hardware.
10900  * This involves constructing a &struct dc_state reflecting the new hardware
10901  * state we wish to commit, then querying DC to see if it is programmable. It's
10902  * important not to modify the existing DC state. Otherwise, atomic_check
10903  * may unexpectedly commit hardware changes.
10904  *
10905  * When validating the DC state, it's important that the right locks are
10906  * acquired. For full updates case which removes/adds/updates streams on one
10907  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10908  * that any such full update commit will wait for completion of any outstanding
10909  * flip using DRMs synchronization events.
10910  *
10911  * Note that DM adds the affected connectors for all CRTCs in state, when that
10912  * might not seem necessary. This is because DC stream creation requires the
10913  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10914  * be possible but non-trivial - a possible TODO item.
10915  *
10916  * Return: -Error code if validation failed.
10917  */
10918 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10919                                   struct drm_atomic_state *state)
10920 {
10921         struct amdgpu_device *adev = drm_to_adev(dev);
10922         struct dm_atomic_state *dm_state = NULL;
10923         struct dc *dc = adev->dm.dc;
10924         struct drm_connector *connector;
10925         struct drm_connector_state *old_con_state, *new_con_state;
10926         struct drm_crtc *crtc;
10927         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10928         struct drm_plane *plane;
10929         struct drm_plane_state *old_plane_state, *new_plane_state;
10930         enum dc_status status;
10931         int ret, i;
10932         bool lock_and_validation_needed = false;
10933         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10934 #if defined(CONFIG_DRM_AMD_DC_DCN)
10935         struct dsc_mst_fairness_vars vars[MAX_PIPES];
10936         struct drm_dp_mst_topology_state *mst_state;
10937         struct drm_dp_mst_topology_mgr *mgr;
10938 #endif
10939
10940         trace_amdgpu_dm_atomic_check_begin(state);
10941
10942         ret = drm_atomic_helper_check_modeset(dev, state);
10943         if (ret) {
10944                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10945                 goto fail;
10946         }
10947
10948         /* Check connector changes */
10949         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10950                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10951                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10952
10953                 /* Skip connectors that are disabled or part of modeset already. */
10954                 if (!old_con_state->crtc && !new_con_state->crtc)
10955                         continue;
10956
10957                 if (!new_con_state->crtc)
10958                         continue;
10959
10960                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10961                 if (IS_ERR(new_crtc_state)) {
10962                         DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10963                         ret = PTR_ERR(new_crtc_state);
10964                         goto fail;
10965                 }
10966
10967                 if (dm_old_con_state->abm_level !=
10968                     dm_new_con_state->abm_level)
10969                         new_crtc_state->connectors_changed = true;
10970         }
10971
10972 #if defined(CONFIG_DRM_AMD_DC_DCN)
10973         if (dc_resource_is_dsc_encoding_supported(dc)) {
10974                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10975                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10976                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
10977                                 if (ret) {
10978                                         DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10979                                         goto fail;
10980                                 }
10981                         }
10982                 }
10983                 pre_validate_dsc(state, &dm_state, vars);
10984         }
10985 #endif
10986         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10987                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10988
10989                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10990                     !new_crtc_state->color_mgmt_changed &&
10991                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10992                         dm_old_crtc_state->dsc_force_changed == false)
10993                         continue;
10994
10995                 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10996                 if (ret) {
10997                         DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10998                         goto fail;
10999                 }
11000
11001                 if (!new_crtc_state->enable)
11002                         continue;
11003
11004                 ret = drm_atomic_add_affected_connectors(state, crtc);
11005                 if (ret) {
11006                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
11007                         goto fail;
11008                 }
11009
11010                 ret = drm_atomic_add_affected_planes(state, crtc);
11011                 if (ret) {
11012                         DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
11013                         goto fail;
11014                 }
11015
11016                 if (dm_old_crtc_state->dsc_force_changed)
11017                         new_crtc_state->mode_changed = true;
11018         }
11019
11020         /*
11021          * Add all primary and overlay planes on the CRTC to the state
11022          * whenever a plane is enabled to maintain correct z-ordering
11023          * and to enable fast surface updates.
11024          */
11025         drm_for_each_crtc(crtc, dev) {
11026                 bool modified = false;
11027
11028                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
11029                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11030                                 continue;
11031
11032                         if (new_plane_state->crtc == crtc ||
11033                             old_plane_state->crtc == crtc) {
11034                                 modified = true;
11035                                 break;
11036                         }
11037                 }
11038
11039                 if (!modified)
11040                         continue;
11041
11042                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
11043                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
11044                                 continue;
11045
11046                         new_plane_state =
11047                                 drm_atomic_get_plane_state(state, plane);
11048
11049                         if (IS_ERR(new_plane_state)) {
11050                                 ret = PTR_ERR(new_plane_state);
11051                                 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11052                                 goto fail;
11053                         }
11054                 }
11055         }
11056
11057         /* Remove exiting planes if they are modified */
11058         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11059                 ret = dm_update_plane_state(dc, state, plane,
11060                                             old_plane_state,
11061                                             new_plane_state,
11062                                             false,
11063                                             &lock_and_validation_needed);
11064                 if (ret) {
11065                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11066                         goto fail;
11067                 }
11068         }
11069
11070         /* Disable all crtcs which require disable */
11071         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11072                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11073                                            old_crtc_state,
11074                                            new_crtc_state,
11075                                            false,
11076                                            &lock_and_validation_needed);
11077                 if (ret) {
11078                         DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11079                         goto fail;
11080                 }
11081         }
11082
11083         /* Enable all crtcs which require enable */
11084         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11085                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11086                                            old_crtc_state,
11087                                            new_crtc_state,
11088                                            true,
11089                                            &lock_and_validation_needed);
11090                 if (ret) {
11091                         DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11092                         goto fail;
11093                 }
11094         }
11095
11096         /* Add new/modified planes */
11097         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11098                 ret = dm_update_plane_state(dc, state, plane,
11099                                             old_plane_state,
11100                                             new_plane_state,
11101                                             true,
11102                                             &lock_and_validation_needed);
11103                 if (ret) {
11104                         DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11105                         goto fail;
11106                 }
11107         }
11108
11109         /* Run this here since we want to validate the streams we created */
11110         ret = drm_atomic_helper_check_planes(dev, state);
11111         if (ret) {
11112                 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11113                 goto fail;
11114         }
11115
11116         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11117                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
11118                 if (dm_new_crtc_state->mpo_requested)
11119                         DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
11120         }
11121
11122         /* Check cursor planes scaling */
11123         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11124                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11125                 if (ret) {
11126                         DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11127                         goto fail;
11128                 }
11129         }
11130
11131         if (state->legacy_cursor_update) {
11132                 /*
11133                  * This is a fast cursor update coming from the plane update
11134                  * helper, check if it can be done asynchronously for better
11135                  * performance.
11136                  */
11137                 state->async_update =
11138                         !drm_atomic_helper_async_check(dev, state);
11139
11140                 /*
11141                  * Skip the remaining global validation if this is an async
11142                  * update. Cursor updates can be done without affecting
11143                  * state or bandwidth calcs and this avoids the performance
11144                  * penalty of locking the private state object and
11145                  * allocating a new dc_state.
11146                  */
11147                 if (state->async_update)
11148                         return 0;
11149         }
11150
11151         /* Check scaling and underscan changes*/
11152         /* TODO Removed scaling changes validation due to inability to commit
11153          * new stream into context w\o causing full reset. Need to
11154          * decide how to handle.
11155          */
11156         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11157                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11158                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11159                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11160
11161                 /* Skip any modesets/resets */
11162                 if (!acrtc || drm_atomic_crtc_needs_modeset(
11163                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11164                         continue;
11165
11166                 /* Skip any thing not scale or underscan changes */
11167                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11168                         continue;
11169
11170                 lock_and_validation_needed = true;
11171         }
11172
11173 #if defined(CONFIG_DRM_AMD_DC_DCN)
11174         /* set the slot info for each mst_state based on the link encoding format */
11175         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11176                 struct amdgpu_dm_connector *aconnector;
11177                 struct drm_connector *connector;
11178                 struct drm_connector_list_iter iter;
11179                 u8 link_coding_cap;
11180
11181                 if (!mgr->mst_state )
11182                         continue;
11183
11184                 drm_connector_list_iter_begin(dev, &iter);
11185                 drm_for_each_connector_iter(connector, &iter) {
11186                         int id = connector->index;
11187
11188                         if (id == mst_state->mgr->conn_base_id) {
11189                                 aconnector = to_amdgpu_dm_connector(connector);
11190                                 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11191                                 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11192
11193                                 break;
11194                         }
11195                 }
11196                 drm_connector_list_iter_end(&iter);
11197
11198         }
11199 #endif
11200         /**
11201          * Streams and planes are reset when there are changes that affect
11202          * bandwidth. Anything that affects bandwidth needs to go through
11203          * DC global validation to ensure that the configuration can be applied
11204          * to hardware.
11205          *
11206          * We have to currently stall out here in atomic_check for outstanding
11207          * commits to finish in this case because our IRQ handlers reference
11208          * DRM state directly - we can end up disabling interrupts too early
11209          * if we don't.
11210          *
11211          * TODO: Remove this stall and drop DM state private objects.
11212          */
11213         if (lock_and_validation_needed) {
11214                 ret = dm_atomic_get_state(state, &dm_state);
11215                 if (ret) {
11216                         DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11217                         goto fail;
11218                 }
11219
11220                 ret = do_aquire_global_lock(dev, state);
11221                 if (ret) {
11222                         DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11223                         goto fail;
11224                 }
11225
11226 #if defined(CONFIG_DRM_AMD_DC_DCN)
11227                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11228                         DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11229                         goto fail;
11230                 }
11231
11232                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11233                 if (ret) {
11234                         DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11235                         goto fail;
11236                 }
11237 #endif
11238
11239                 /*
11240                  * Perform validation of MST topology in the state:
11241                  * We need to perform MST atomic check before calling
11242                  * dc_validate_global_state(), or there is a chance
11243                  * to get stuck in an infinite loop and hang eventually.
11244                  */
11245                 ret = drm_dp_mst_atomic_check(state);
11246                 if (ret) {
11247                         DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11248                         goto fail;
11249                 }
11250                 status = dc_validate_global_state(dc, dm_state->context, true);
11251                 if (status != DC_OK) {
11252                         DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11253                                        dc_status_to_str(status), status);
11254                         ret = -EINVAL;
11255                         goto fail;
11256                 }
11257         } else {
11258                 /*
11259                  * The commit is a fast update. Fast updates shouldn't change
11260                  * the DC context, affect global validation, and can have their
11261                  * commit work done in parallel with other commits not touching
11262                  * the same resource. If we have a new DC context as part of
11263                  * the DM atomic state from validation we need to free it and
11264                  * retain the existing one instead.
11265                  *
11266                  * Furthermore, since the DM atomic state only contains the DC
11267                  * context and can safely be annulled, we can free the state
11268                  * and clear the associated private object now to free
11269                  * some memory and avoid a possible use-after-free later.
11270                  */
11271
11272                 for (i = 0; i < state->num_private_objs; i++) {
11273                         struct drm_private_obj *obj = state->private_objs[i].ptr;
11274
11275                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
11276                                 int j = state->num_private_objs-1;
11277
11278                                 dm_atomic_destroy_state(obj,
11279                                                 state->private_objs[i].state);
11280
11281                                 /* If i is not at the end of the array then the
11282                                  * last element needs to be moved to where i was
11283                                  * before the array can safely be truncated.
11284                                  */
11285                                 if (i != j)
11286                                         state->private_objs[i] =
11287                                                 state->private_objs[j];
11288
11289                                 state->private_objs[j].ptr = NULL;
11290                                 state->private_objs[j].state = NULL;
11291                                 state->private_objs[j].old_state = NULL;
11292                                 state->private_objs[j].new_state = NULL;
11293
11294                                 state->num_private_objs = j;
11295                                 break;
11296                         }
11297                 }
11298         }
11299
11300         /* Store the overall update type for use later in atomic check. */
11301         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11302                 struct dm_crtc_state *dm_new_crtc_state =
11303                         to_dm_crtc_state(new_crtc_state);
11304
11305                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11306                                                          UPDATE_TYPE_FULL :
11307                                                          UPDATE_TYPE_FAST;
11308         }
11309
11310         /* Must be success */
11311         WARN_ON(ret);
11312
11313         trace_amdgpu_dm_atomic_check_finish(state, ret);
11314
11315         return ret;
11316
11317 fail:
11318         if (ret == -EDEADLK)
11319                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11320         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11321                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11322         else
11323                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11324
11325         trace_amdgpu_dm_atomic_check_finish(state, ret);
11326
11327         return ret;
11328 }
11329
11330 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11331                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
11332 {
11333         uint8_t dpcd_data;
11334         bool capable = false;
11335
11336         if (amdgpu_dm_connector->dc_link &&
11337                 dm_helpers_dp_read_dpcd(
11338                                 NULL,
11339                                 amdgpu_dm_connector->dc_link,
11340                                 DP_DOWN_STREAM_PORT_COUNT,
11341                                 &dpcd_data,
11342                                 sizeof(dpcd_data))) {
11343                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11344         }
11345
11346         return capable;
11347 }
11348
11349 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11350                 unsigned int offset,
11351                 unsigned int total_length,
11352                 uint8_t *data,
11353                 unsigned int length,
11354                 struct amdgpu_hdmi_vsdb_info *vsdb)
11355 {
11356         bool res;
11357         union dmub_rb_cmd cmd;
11358         struct dmub_cmd_send_edid_cea *input;
11359         struct dmub_cmd_edid_cea_output *output;
11360
11361         if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11362                 return false;
11363
11364         memset(&cmd, 0, sizeof(cmd));
11365
11366         input = &cmd.edid_cea.data.input;
11367
11368         cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11369         cmd.edid_cea.header.sub_type = 0;
11370         cmd.edid_cea.header.payload_bytes =
11371                 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11372         input->offset = offset;
11373         input->length = length;
11374         input->cea_total_length = total_length;
11375         memcpy(input->payload, data, length);
11376
11377         res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11378         if (!res) {
11379                 DRM_ERROR("EDID CEA parser failed\n");
11380                 return false;
11381         }
11382
11383         output = &cmd.edid_cea.data.output;
11384
11385         if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11386                 if (!output->ack.success) {
11387                         DRM_ERROR("EDID CEA ack failed at offset %d\n",
11388                                         output->ack.offset);
11389                 }
11390         } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11391                 if (!output->amd_vsdb.vsdb_found)
11392                         return false;
11393
11394                 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11395                 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11396                 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11397                 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11398         } else {
11399                 DRM_WARN("Unknown EDID CEA parser results\n");
11400                 return false;
11401         }
11402
11403         return true;
11404 }
11405
11406 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11407                 uint8_t *edid_ext, int len,
11408                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11409 {
11410         int i;
11411
11412         /* send extension block to DMCU for parsing */
11413         for (i = 0; i < len; i += 8) {
11414                 bool res;
11415                 int offset;
11416
11417                 /* send 8 bytes a time */
11418                 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11419                         return false;
11420
11421                 if (i+8 == len) {
11422                         /* EDID block sent completed, expect result */
11423                         int version, min_rate, max_rate;
11424
11425                         res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11426                         if (res) {
11427                                 /* amd vsdb found */
11428                                 vsdb_info->freesync_supported = 1;
11429                                 vsdb_info->amd_vsdb_version = version;
11430                                 vsdb_info->min_refresh_rate_hz = min_rate;
11431                                 vsdb_info->max_refresh_rate_hz = max_rate;
11432                                 return true;
11433                         }
11434                         /* not amd vsdb */
11435                         return false;
11436                 }
11437
11438                 /* check for ack*/
11439                 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11440                 if (!res)
11441                         return false;
11442         }
11443
11444         return false;
11445 }
11446
11447 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11448                 uint8_t *edid_ext, int len,
11449                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11450 {
11451         int i;
11452
11453         /* send extension block to DMCU for parsing */
11454         for (i = 0; i < len; i += 8) {
11455                 /* send 8 bytes a time */
11456                 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11457                         return false;
11458         }
11459
11460         return vsdb_info->freesync_supported;
11461 }
11462
11463 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11464                 uint8_t *edid_ext, int len,
11465                 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11466 {
11467         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11468
11469         if (adev->dm.dmub_srv)
11470                 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11471         else
11472                 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11473 }
11474
11475 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11476                 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11477 {
11478         uint8_t *edid_ext = NULL;
11479         int i;
11480         bool valid_vsdb_found = false;
11481
11482         /*----- drm_find_cea_extension() -----*/
11483         /* No EDID or EDID extensions */
11484         if (edid == NULL || edid->extensions == 0)
11485                 return -ENODEV;
11486
11487         /* Find CEA extension */
11488         for (i = 0; i < edid->extensions; i++) {
11489                 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11490                 if (edid_ext[0] == CEA_EXT)
11491                         break;
11492         }
11493
11494         if (i == edid->extensions)
11495                 return -ENODEV;
11496
11497         /*----- cea_db_offsets() -----*/
11498         if (edid_ext[0] != CEA_EXT)
11499                 return -ENODEV;
11500
11501         valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11502
11503         return valid_vsdb_found ? i : -ENODEV;
11504 }
11505
11506 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11507                                         struct edid *edid)
11508 {
11509         int i = 0;
11510         struct detailed_timing *timing;
11511         struct detailed_non_pixel *data;
11512         struct detailed_data_monitor_range *range;
11513         struct amdgpu_dm_connector *amdgpu_dm_connector =
11514                         to_amdgpu_dm_connector(connector);
11515         struct dm_connector_state *dm_con_state = NULL;
11516         struct dc_sink *sink;
11517
11518         struct drm_device *dev = connector->dev;
11519         struct amdgpu_device *adev = drm_to_adev(dev);
11520         bool freesync_capable = false;
11521         struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11522
11523         if (!connector->state) {
11524                 DRM_ERROR("%s - Connector has no state", __func__);
11525                 goto update;
11526         }
11527
11528         sink = amdgpu_dm_connector->dc_sink ?
11529                 amdgpu_dm_connector->dc_sink :
11530                 amdgpu_dm_connector->dc_em_sink;
11531
11532         if (!edid || !sink) {
11533                 dm_con_state = to_dm_connector_state(connector->state);
11534
11535                 amdgpu_dm_connector->min_vfreq = 0;
11536                 amdgpu_dm_connector->max_vfreq = 0;
11537                 amdgpu_dm_connector->pixel_clock_mhz = 0;
11538                 connector->display_info.monitor_range.min_vfreq = 0;
11539                 connector->display_info.monitor_range.max_vfreq = 0;
11540                 freesync_capable = false;
11541
11542                 goto update;
11543         }
11544
11545         dm_con_state = to_dm_connector_state(connector->state);
11546
11547         if (!adev->dm.freesync_module)
11548                 goto update;
11549
11550
11551         if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11552                 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11553                 bool edid_check_required = false;
11554
11555                 if (edid) {
11556                         edid_check_required = is_dp_capable_without_timing_msa(
11557                                                 adev->dm.dc,
11558                                                 amdgpu_dm_connector);
11559                 }
11560
11561                 if (edid_check_required == true && (edid->version > 1 ||
11562                    (edid->version == 1 && edid->revision > 1))) {
11563                         for (i = 0; i < 4; i++) {
11564
11565                                 timing  = &edid->detailed_timings[i];
11566                                 data    = &timing->data.other_data;
11567                                 range   = &data->data.range;
11568                                 /*
11569                                  * Check if monitor has continuous frequency mode
11570                                  */
11571                                 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11572                                         continue;
11573                                 /*
11574                                  * Check for flag range limits only. If flag == 1 then
11575                                  * no additional timing information provided.
11576                                  * Default GTF, GTF Secondary curve and CVT are not
11577                                  * supported
11578                                  */
11579                                 if (range->flags != 1)
11580                                         continue;
11581
11582                                 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11583                                 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11584                                 amdgpu_dm_connector->pixel_clock_mhz =
11585                                         range->pixel_clock_mhz * 10;
11586
11587                                 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11588                                 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11589
11590                                 break;
11591                         }
11592
11593                         if (amdgpu_dm_connector->max_vfreq -
11594                             amdgpu_dm_connector->min_vfreq > 10) {
11595
11596                                 freesync_capable = true;
11597                         }
11598                 }
11599         } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11600                 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11601                 if (i >= 0 && vsdb_info.freesync_supported) {
11602                         timing  = &edid->detailed_timings[i];
11603                         data    = &timing->data.other_data;
11604
11605                         amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11606                         amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11607                         if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11608                                 freesync_capable = true;
11609
11610                         connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11611                         connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11612                 }
11613         }
11614
11615 update:
11616         if (dm_con_state)
11617                 dm_con_state->freesync_capable = freesync_capable;
11618
11619         if (connector->vrr_capable_property)
11620                 drm_connector_set_vrr_capable_property(connector,
11621                                                        freesync_capable);
11622 }
11623
11624 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11625 {
11626         struct amdgpu_device *adev = drm_to_adev(dev);
11627         struct dc *dc = adev->dm.dc;
11628         int i;
11629
11630         mutex_lock(&adev->dm.dc_lock);
11631         if (dc->current_state) {
11632                 for (i = 0; i < dc->current_state->stream_count; ++i)
11633                         dc->current_state->streams[i]
11634                                 ->triggered_crtc_reset.enabled =
11635                                 adev->dm.force_timing_sync;
11636
11637                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11638                 dc_trigger_sync(dc, dc->current_state);
11639         }
11640         mutex_unlock(&adev->dm.dc_lock);
11641 }
11642
11643 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11644                        uint32_t value, const char *func_name)
11645 {
11646 #ifdef DM_CHECK_ADDR_0
11647         if (address == 0) {
11648                 DC_ERR("invalid register write. address = 0");
11649                 return;
11650         }
11651 #endif
11652         cgs_write_register(ctx->cgs_device, address, value);
11653         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11654 }
11655
11656 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11657                           const char *func_name)
11658 {
11659         uint32_t value;
11660 #ifdef DM_CHECK_ADDR_0
11661         if (address == 0) {
11662                 DC_ERR("invalid register read; address = 0\n");
11663                 return 0;
11664         }
11665 #endif
11666
11667         if (ctx->dmub_srv &&
11668             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11669             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11670                 ASSERT(false);
11671                 return 0;
11672         }
11673
11674         value = cgs_read_register(ctx->cgs_device, address);
11675
11676         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11677
11678         return value;
11679 }
11680
11681 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
11682                                                 struct dc_context *ctx,
11683                                                 uint8_t status_type,
11684                                                 uint32_t *operation_result)
11685 {
11686         struct amdgpu_device *adev = ctx->driver_context;
11687         int return_status = -1;
11688         struct dmub_notification *p_notify = adev->dm.dmub_notify;
11689
11690         if (is_cmd_aux) {
11691                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11692                         return_status = p_notify->aux_reply.length;
11693                         *operation_result = p_notify->result;
11694                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11695                         *operation_result = AUX_RET_ERROR_TIMEOUT;
11696                 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11697                         *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11698                 } else {
11699                         *operation_result = AUX_RET_ERROR_UNKNOWN;
11700                 }
11701         } else {
11702                 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11703                         return_status = 0;
11704                         *operation_result = p_notify->sc_status;
11705                 } else {
11706                         *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11707                 }
11708         }
11709
11710         return return_status;
11711 }
11712
11713 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11714         unsigned int link_index, void *cmd_payload, void *operation_result)
11715 {
11716         struct amdgpu_device *adev = ctx->driver_context;
11717         int ret = 0;
11718
11719         if (is_cmd_aux) {
11720                 dc_process_dmub_aux_transfer_async(ctx->dc,
11721                         link_index, (struct aux_payload *)cmd_payload);
11722         } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11723                                         (struct set_config_cmd_payload *)cmd_payload,
11724                                         adev->dm.dmub_notify)) {
11725                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11726                                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11727                                         (uint32_t *)operation_result);
11728         }
11729
11730         ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11731         if (ret == 0) {
11732                 DRM_ERROR("wait_for_completion_timeout timeout!");
11733                 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11734                                 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11735                                 (uint32_t *)operation_result);
11736         }
11737
11738         if (is_cmd_aux) {
11739                 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11740                         struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11741
11742                         payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11743                         if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11744                             payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11745                                 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11746                                        adev->dm.dmub_notify->aux_reply.length);
11747                         }
11748                 }
11749         }
11750
11751         return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11752                         ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11753                         (uint32_t *)operation_result);
11754 }
11755
11756 /*
11757  * Check whether seamless boot is supported.
11758  *
11759  * So far we only support seamless boot on CHIP_VANGOGH.
11760  * If everything goes well, we may consider expanding
11761  * seamless boot to other ASICs.
11762  */
11763 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11764 {
11765         switch (adev->asic_type) {
11766         case CHIP_VANGOGH:
11767                 if (!adev->mman.keep_stolen_vga_memory)
11768                         return true;
11769                 break;
11770         default:
11771                 break;
11772         }
11773
11774         return false;
11775 }