drm/amd/display: Fix S3 topology change
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 #include "dm_services_types.h"
27 #include "dc.h"
28 #include "dc/inc/core_types.h"
29
30 #include "vid.h"
31 #include "amdgpu.h"
32 #include "amdgpu_display.h"
33 #include "atom.h"
34 #include "amdgpu_dm.h"
35 #include "amdgpu_pm.h"
36
37 #include "amd_shared.h"
38 #include "amdgpu_dm_irq.h"
39 #include "dm_helpers.h"
40 #include "dm_services_types.h"
41 #include "amdgpu_dm_mst_types.h"
42
43 #include "ivsrcid/ivsrcid_vislands30.h"
44
45 #include <linux/module.h>
46 #include <linux/moduleparam.h>
47 #include <linux/version.h>
48 #include <linux/types.h>
49
50 #include <drm/drmP.h>
51 #include <drm/drm_atomic.h>
52 #include <drm/drm_atomic_helper.h>
53 #include <drm/drm_dp_mst_helper.h>
54 #include <drm/drm_fb_helper.h>
55 #include <drm/drm_edid.h>
56
57 #include "modules/inc/mod_freesync.h"
58
59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
60 #include "ivsrcid/irqsrcs_dcn_1_0.h"
61
62 #include "raven1/DCN/dcn_1_0_offset.h"
63 #include "raven1/DCN/dcn_1_0_sh_mask.h"
64 #include "vega10/soc15ip.h"
65
66 #include "soc15_common.h"
67 #endif
68
69 #include "modules/inc/mod_freesync.h"
70
71 #include "i2caux_interface.h"
72
73 /* basic init/fini API */
74 static int amdgpu_dm_init(struct amdgpu_device *adev);
75 static void amdgpu_dm_fini(struct amdgpu_device *adev);
76
77 /* initializes drm_device display related structures, based on the information
78  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
79  * drm_encoder, drm_mode_config
80  *
81  * Returns 0 on success
82  */
83 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
84 /* removes and deallocates the drm structures, created by the above function */
85 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
86
87 static void
88 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
89
90 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
91                                 struct amdgpu_plane *aplane,
92                                 unsigned long possible_crtcs);
93 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
94                                struct drm_plane *plane,
95                                uint32_t link_index);
96 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
97                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
98                                     uint32_t link_index,
99                                     struct amdgpu_encoder *amdgpu_encoder);
100 static int amdgpu_dm_encoder_init(struct drm_device *dev,
101                                   struct amdgpu_encoder *aencoder,
102                                   uint32_t link_index);
103
104 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
105
106 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
107                                    struct drm_atomic_state *state,
108                                    bool nonblock);
109
110 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
111
112 static int amdgpu_dm_atomic_check(struct drm_device *dev,
113                                   struct drm_atomic_state *state);
114
115
116
117
118 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
119         DRM_PLANE_TYPE_PRIMARY,
120         DRM_PLANE_TYPE_PRIMARY,
121         DRM_PLANE_TYPE_PRIMARY,
122         DRM_PLANE_TYPE_PRIMARY,
123         DRM_PLANE_TYPE_PRIMARY,
124         DRM_PLANE_TYPE_PRIMARY,
125 };
126
127 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
128         DRM_PLANE_TYPE_PRIMARY,
129         DRM_PLANE_TYPE_PRIMARY,
130         DRM_PLANE_TYPE_PRIMARY,
131         DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
132 };
133
134 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
135         DRM_PLANE_TYPE_PRIMARY,
136         DRM_PLANE_TYPE_PRIMARY,
137         DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
138 };
139
140 /*
141  * dm_vblank_get_counter
142  *
143  * @brief
144  * Get counter for number of vertical blanks
145  *
146  * @param
147  * struct amdgpu_device *adev - [in] desired amdgpu device
148  * int disp_idx - [in] which CRTC to get the counter from
149  *
150  * @return
151  * Counter for vertical blanks
152  */
153 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
154 {
155         if (crtc >= adev->mode_info.num_crtc)
156                 return 0;
157         else {
158                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
159                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
160                                 acrtc->base.state);
161
162
163                 if (acrtc_state->stream == NULL) {
164                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
165                                   crtc);
166                         return 0;
167                 }
168
169                 return dc_stream_get_vblank_counter(acrtc_state->stream);
170         }
171 }
172
173 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
174                                   u32 *vbl, u32 *position)
175 {
176         uint32_t v_blank_start, v_blank_end, h_position, v_position;
177
178         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
179                 return -EINVAL;
180         else {
181                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
182                 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
183                                                 acrtc->base.state);
184
185                 if (acrtc_state->stream ==  NULL) {
186                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
187                                   crtc);
188                         return 0;
189                 }
190
191                 /*
192                  * TODO rework base driver to use values directly.
193                  * for now parse it back into reg-format
194                  */
195                 dc_stream_get_scanoutpos(acrtc_state->stream,
196                                          &v_blank_start,
197                                          &v_blank_end,
198                                          &h_position,
199                                          &v_position);
200
201                 *position = v_position | (h_position << 16);
202                 *vbl = v_blank_start | (v_blank_end << 16);
203         }
204
205         return 0;
206 }
207
208 static bool dm_is_idle(void *handle)
209 {
210         /* XXX todo */
211         return true;
212 }
213
214 static int dm_wait_for_idle(void *handle)
215 {
216         /* XXX todo */
217         return 0;
218 }
219
220 static bool dm_check_soft_reset(void *handle)
221 {
222         return false;
223 }
224
225 static int dm_soft_reset(void *handle)
226 {
227         /* XXX todo */
228         return 0;
229 }
230
231 static struct amdgpu_crtc *
232 get_crtc_by_otg_inst(struct amdgpu_device *adev,
233                      int otg_inst)
234 {
235         struct drm_device *dev = adev->ddev;
236         struct drm_crtc *crtc;
237         struct amdgpu_crtc *amdgpu_crtc;
238
239         /*
240          * following if is check inherited from both functions where this one is
241          * used now. Need to be checked why it could happen.
242          */
243         if (otg_inst == -1) {
244                 WARN_ON(1);
245                 return adev->mode_info.crtcs[0];
246         }
247
248         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
249                 amdgpu_crtc = to_amdgpu_crtc(crtc);
250
251                 if (amdgpu_crtc->otg_inst == otg_inst)
252                         return amdgpu_crtc;
253         }
254
255         return NULL;
256 }
257
258 static void dm_pflip_high_irq(void *interrupt_params)
259 {
260         struct amdgpu_crtc *amdgpu_crtc;
261         struct common_irq_params *irq_params = interrupt_params;
262         struct amdgpu_device *adev = irq_params->adev;
263         unsigned long flags;
264
265         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
266
267         /* IRQ could occur when in initial stage */
268         /*TODO work and BO cleanup */
269         if (amdgpu_crtc == NULL) {
270                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
271                 return;
272         }
273
274         spin_lock_irqsave(&adev->ddev->event_lock, flags);
275
276         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
277                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
278                                                  amdgpu_crtc->pflip_status,
279                                                  AMDGPU_FLIP_SUBMITTED,
280                                                  amdgpu_crtc->crtc_id,
281                                                  amdgpu_crtc);
282                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
283                 return;
284         }
285
286
287         /* wakeup usersapce */
288         if (amdgpu_crtc->event) {
289                 /* Update to correct count/ts if racing with vblank irq */
290                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
291
292                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
293
294                 /* page flip completed. clean up */
295                 amdgpu_crtc->event = NULL;
296
297         } else
298                 WARN_ON(1);
299
300         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
301         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
302
303         DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
304                                         __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
305
306         drm_crtc_vblank_put(&amdgpu_crtc->base);
307 }
308
309 static void dm_crtc_high_irq(void *interrupt_params)
310 {
311         struct common_irq_params *irq_params = interrupt_params;
312         struct amdgpu_device *adev = irq_params->adev;
313         uint8_t crtc_index = 0;
314         struct amdgpu_crtc *acrtc;
315
316         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
317
318         if (acrtc)
319                 crtc_index = acrtc->crtc_id;
320
321         drm_handle_vblank(adev->ddev, crtc_index);
322 }
323
324 static int dm_set_clockgating_state(void *handle,
325                   enum amd_clockgating_state state)
326 {
327         return 0;
328 }
329
330 static int dm_set_powergating_state(void *handle,
331                   enum amd_powergating_state state)
332 {
333         return 0;
334 }
335
336 /* Prototypes of private functions */
337 static int dm_early_init(void* handle);
338
339 static void hotplug_notify_work_func(struct work_struct *work)
340 {
341         struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
342         struct drm_device *dev = dm->ddev;
343
344         drm_kms_helper_hotplug_event(dev);
345 }
346
347 #if defined(CONFIG_DRM_AMD_DC_FBC)
348 #include "dal_asic_id.h"
349 /* Allocate memory for FBC compressed data  */
350 /* TODO: Dynamic allocation */
351 #define AMDGPU_FBC_SIZE    (3840 * 2160 * 4)
352
353 static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
354 {
355         int r;
356         struct dm_comressor_info *compressor = &adev->dm.compressor;
357
358         if (!compressor->bo_ptr) {
359                 r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
360                                 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
361                                 &compressor->gpu_addr, &compressor->cpu_addr);
362
363                 if (r)
364                         DRM_ERROR("DM: Failed to initialize fbc\n");
365         }
366
367 }
368 #endif
369
370
371 /* Init display KMS
372  *
373  * Returns 0 on success
374  */
375 static int amdgpu_dm_init(struct amdgpu_device *adev)
376 {
377         struct dc_init_data init_data;
378         adev->dm.ddev = adev->ddev;
379         adev->dm.adev = adev;
380
381         /* Zero all the fields */
382         memset(&init_data, 0, sizeof(init_data));
383
384         /* initialize DAL's lock (for SYNC context use) */
385         spin_lock_init(&adev->dm.dal_lock);
386
387         /* initialize DAL's mutex */
388         mutex_init(&adev->dm.dal_mutex);
389
390         if(amdgpu_dm_irq_init(adev)) {
391                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
392                 goto error;
393         }
394
395         init_data.asic_id.chip_family = adev->family;
396
397         init_data.asic_id.pci_revision_id = adev->rev_id;
398         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
399
400         init_data.asic_id.vram_width = adev->mc.vram_width;
401         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
402         init_data.asic_id.atombios_base_address =
403                 adev->mode_info.atom_context->bios;
404
405         init_data.driver = adev;
406
407         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
408
409         if (!adev->dm.cgs_device) {
410                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
411                 goto error;
412         }
413
414         init_data.cgs_device = adev->dm.cgs_device;
415
416         adev->dm.dal = NULL;
417
418         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
419
420         if (amdgpu_dc_log)
421                 init_data.log_mask = DC_DEFAULT_LOG_MASK;
422         else
423                 init_data.log_mask = DC_MIN_LOG_MASK;
424
425 #if defined(CONFIG_DRM_AMD_DC_FBC)
426         if (adev->family == FAMILY_CZ)
427                 amdgpu_dm_initialize_fbc(adev);
428         init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
429 #endif
430         /* Display Core create. */
431         adev->dm.dc = dc_create(&init_data);
432
433         if (adev->dm.dc) {
434                 DRM_INFO("Display Core initialized!\n");
435         } else {
436                 DRM_INFO("Display Core failed to initialize!\n");
437                 goto error;
438         }
439
440         INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
441
442         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
443         if (!adev->dm.freesync_module) {
444                 DRM_ERROR(
445                 "amdgpu: failed to initialize freesync_module.\n");
446         } else
447                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
448                                 adev->dm.freesync_module);
449
450         if (amdgpu_dm_initialize_drm_device(adev)) {
451                 DRM_ERROR(
452                 "amdgpu: failed to initialize sw for display support.\n");
453                 goto error;
454         }
455
456         /* Update the actual used number of crtc */
457         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
458
459         /* TODO: Add_display_info? */
460
461         /* TODO use dynamic cursor width */
462         adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
463         adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
464
465         if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
466                 DRM_ERROR(
467                 "amdgpu: failed to initialize sw for display support.\n");
468                 goto error;
469         }
470
471         DRM_DEBUG_DRIVER("KMS initialized.\n");
472
473         return 0;
474 error:
475         amdgpu_dm_fini(adev);
476
477         return -1;
478 }
479
480 static void amdgpu_dm_fini(struct amdgpu_device *adev)
481 {
482         amdgpu_dm_destroy_drm_device(&adev->dm);
483         /*
484          * TODO: pageflip, vlank interrupt
485          *
486          * amdgpu_dm_irq_fini(adev);
487          */
488
489         if (adev->dm.cgs_device) {
490                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
491                 adev->dm.cgs_device = NULL;
492         }
493         if (adev->dm.freesync_module) {
494                 mod_freesync_destroy(adev->dm.freesync_module);
495                 adev->dm.freesync_module = NULL;
496         }
497         /* DC Destroy TODO: Replace destroy DAL */
498         if (adev->dm.dc)
499                 dc_destroy(&adev->dm.dc);
500         return;
501 }
502
503 static int dm_sw_init(void *handle)
504 {
505         return 0;
506 }
507
508 static int dm_sw_fini(void *handle)
509 {
510         return 0;
511 }
512
513 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
514 {
515         struct amdgpu_dm_connector *aconnector;
516         struct drm_connector *connector;
517         int ret = 0;
518
519         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
520
521         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
522                 aconnector = to_amdgpu_dm_connector(connector);
523                 if (aconnector->dc_link->type == dc_connection_mst_branch) {
524                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
525                                         aconnector, aconnector->base.base.id);
526
527                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
528                         if (ret < 0) {
529                                 DRM_ERROR("DM_MST: Failed to start MST\n");
530                                 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
531                                 return ret;
532                                 }
533                         }
534         }
535
536         drm_modeset_unlock(&dev->mode_config.connection_mutex);
537         return ret;
538 }
539
540 static int dm_late_init(void *handle)
541 {
542         struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
543
544         return detect_mst_link_for_all_connectors(dev);
545 }
546
547 static void s3_handle_mst(struct drm_device *dev, bool suspend)
548 {
549         struct amdgpu_dm_connector *aconnector;
550         struct drm_connector *connector;
551
552         drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
553
554         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
555                    aconnector = to_amdgpu_dm_connector(connector);
556                    if (aconnector->dc_link->type == dc_connection_mst_branch &&
557                                    !aconnector->mst_port) {
558
559                            if (suspend)
560                                    drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
561                            else
562                                    drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
563                    }
564         }
565
566         drm_modeset_unlock(&dev->mode_config.connection_mutex);
567 }
568
569 static int dm_hw_init(void *handle)
570 {
571         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
572         /* Create DAL display manager */
573         amdgpu_dm_init(adev);
574         amdgpu_dm_hpd_init(adev);
575
576         return 0;
577 }
578
579 static int dm_hw_fini(void *handle)
580 {
581         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582
583         amdgpu_dm_hpd_fini(adev);
584
585         amdgpu_dm_irq_fini(adev);
586         amdgpu_dm_fini(adev);
587         return 0;
588 }
589
590 static int dm_suspend(void *handle)
591 {
592         struct amdgpu_device *adev = handle;
593         struct amdgpu_display_manager *dm = &adev->dm;
594         int ret = 0;
595
596         s3_handle_mst(adev->ddev, true);
597
598         amdgpu_dm_irq_suspend(adev);
599
600         WARN_ON(adev->dm.cached_state);
601         adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
602
603         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
604
605         return ret;
606 }
607
608 static struct amdgpu_dm_connector *
609 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
610                                              struct drm_crtc *crtc)
611 {
612         uint32_t i;
613         struct drm_connector_state *new_con_state;
614         struct drm_connector *connector;
615         struct drm_crtc *crtc_from_state;
616
617         for_each_new_connector_in_state(state, connector, new_con_state, i) {
618                 crtc_from_state = new_con_state->crtc;
619
620                 if (crtc_from_state == crtc)
621                         return to_amdgpu_dm_connector(connector);
622         }
623
624         return NULL;
625 }
626
627 static int dm_resume(void *handle)
628 {
629         struct amdgpu_device *adev = handle;
630         struct amdgpu_display_manager *dm = &adev->dm;
631
632         /* power on hardware */
633         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
634
635         return 0;
636 }
637
638 int amdgpu_dm_display_resume(struct amdgpu_device *adev)
639 {
640         struct drm_device *ddev = adev->ddev;
641         struct amdgpu_display_manager *dm = &adev->dm;
642         struct amdgpu_dm_connector *aconnector;
643         struct drm_connector *connector;
644         struct drm_crtc *crtc;
645         struct drm_crtc_state *new_crtc_state;
646         struct dm_crtc_state *dm_new_crtc_state;
647         struct drm_plane *plane;
648         struct drm_plane_state *new_plane_state;
649         struct dm_plane_state *dm_new_plane_state;
650
651         int ret = 0;
652         int i;
653
654         /* program HPD filter */
655         dc_resume(dm->dc);
656
657         /* On resume we need to  rewrite the MSTM control bits to enamble MST*/
658         s3_handle_mst(ddev, false);
659
660         /*
661          * early enable HPD Rx IRQ, should be done before set mode as short
662          * pulse interrupts are used for MST
663          */
664         amdgpu_dm_irq_resume_early(adev);
665
666         /* Do detection*/
667         list_for_each_entry(connector,
668                         &ddev->mode_config.connector_list, head) {
669                 aconnector = to_amdgpu_dm_connector(connector);
670
671                 /*
672                  * this is the case when traversing through already created
673                  * MST connectors, should be skipped
674                  */
675                 if (aconnector->mst_port)
676                         continue;
677
678                 mutex_lock(&aconnector->hpd_lock);
679                 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
680
681                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
682                         aconnector->fake_enable = false;
683
684                 aconnector->dc_sink = NULL;
685                 amdgpu_dm_update_connector_after_detect(aconnector);
686                 mutex_unlock(&aconnector->hpd_lock);
687         }
688
689         /* Force mode set in atomic comit */
690         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
691                 new_crtc_state->active_changed = true;
692
693         /*
694          * atomic_check is expected to create the dc states. We need to release
695          * them here, since they were duplicated as part of the suspend
696          * procedure.
697          */
698         for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
699                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
700                 if (dm_new_crtc_state->stream) {
701                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
702                         dc_stream_release(dm_new_crtc_state->stream);
703                         dm_new_crtc_state->stream = NULL;
704                 }
705         }
706
707         for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
708                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
709                 if (dm_new_plane_state->dc_state) {
710                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
711                         dc_plane_state_release(dm_new_plane_state->dc_state);
712                         dm_new_plane_state->dc_state = NULL;
713                 }
714         }
715
716         ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
717
718         drm_atomic_state_put(adev->dm.cached_state);
719         adev->dm.cached_state = NULL;
720
721         amdgpu_dm_irq_resume_late(adev);
722
723         return ret;
724 }
725
726 static const struct amd_ip_funcs amdgpu_dm_funcs = {
727         .name = "dm",
728         .early_init = dm_early_init,
729         .late_init = dm_late_init,
730         .sw_init = dm_sw_init,
731         .sw_fini = dm_sw_fini,
732         .hw_init = dm_hw_init,
733         .hw_fini = dm_hw_fini,
734         .suspend = dm_suspend,
735         .resume = dm_resume,
736         .is_idle = dm_is_idle,
737         .wait_for_idle = dm_wait_for_idle,
738         .check_soft_reset = dm_check_soft_reset,
739         .soft_reset = dm_soft_reset,
740         .set_clockgating_state = dm_set_clockgating_state,
741         .set_powergating_state = dm_set_powergating_state,
742 };
743
744 const struct amdgpu_ip_block_version dm_ip_block =
745 {
746         .type = AMD_IP_BLOCK_TYPE_DCE,
747         .major = 1,
748         .minor = 0,
749         .rev = 0,
750         .funcs = &amdgpu_dm_funcs,
751 };
752
753
754 static struct drm_atomic_state *
755 dm_atomic_state_alloc(struct drm_device *dev)
756 {
757         struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
758
759         if (!state)
760                 return NULL;
761
762         if (drm_atomic_state_init(dev, &state->base) < 0)
763                 goto fail;
764
765         return &state->base;
766
767 fail:
768         kfree(state);
769         return NULL;
770 }
771
772 static void
773 dm_atomic_state_clear(struct drm_atomic_state *state)
774 {
775         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
776
777         if (dm_state->context) {
778                 dc_release_state(dm_state->context);
779                 dm_state->context = NULL;
780         }
781
782         drm_atomic_state_default_clear(state);
783 }
784
785 static void
786 dm_atomic_state_alloc_free(struct drm_atomic_state *state)
787 {
788         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
789         drm_atomic_state_default_release(state);
790         kfree(dm_state);
791 }
792
793 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
794         .fb_create = amdgpu_user_framebuffer_create,
795         .output_poll_changed = amdgpu_output_poll_changed,
796         .atomic_check = amdgpu_dm_atomic_check,
797         .atomic_commit = amdgpu_dm_atomic_commit,
798         .atomic_state_alloc = dm_atomic_state_alloc,
799         .atomic_state_clear = dm_atomic_state_clear,
800         .atomic_state_free = dm_atomic_state_alloc_free
801 };
802
803 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
804         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
805 };
806
807 static void
808 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
809 {
810         struct drm_connector *connector = &aconnector->base;
811         struct drm_device *dev = connector->dev;
812         struct dc_sink *sink;
813
814         /* MST handled by drm_mst framework */
815         if (aconnector->mst_mgr.mst_state == true)
816                 return;
817
818
819         sink = aconnector->dc_link->local_sink;
820
821         /* Edid mgmt connector gets first update only in mode_valid hook and then
822          * the connector sink is set to either fake or physical sink depends on link status.
823          * don't do it here if u are during boot
824          */
825         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
826                         && aconnector->dc_em_sink) {
827
828                 /* For S3 resume with headless use eml_sink to fake stream
829                  * because on resume connecotr->sink is set ti NULL
830                  */
831                 mutex_lock(&dev->mode_config.mutex);
832
833                 if (sink) {
834                         if (aconnector->dc_sink) {
835                                 amdgpu_dm_remove_sink_from_freesync_module(
836                                                                 connector);
837                                 /* retain and release bellow are used for
838                                  * bump up refcount for sink because the link don't point
839                                  * to it anymore after disconnect so on next crtc to connector
840                                  * reshuffle by UMD we will get into unwanted dc_sink release
841                                  */
842                                 if (aconnector->dc_sink != aconnector->dc_em_sink)
843                                         dc_sink_release(aconnector->dc_sink);
844                         }
845                         aconnector->dc_sink = sink;
846                         amdgpu_dm_add_sink_to_freesync_module(
847                                                 connector, aconnector->edid);
848                 } else {
849                         amdgpu_dm_remove_sink_from_freesync_module(connector);
850                         if (!aconnector->dc_sink)
851                                 aconnector->dc_sink = aconnector->dc_em_sink;
852                         else if (aconnector->dc_sink != aconnector->dc_em_sink)
853                                 dc_sink_retain(aconnector->dc_sink);
854                 }
855
856                 mutex_unlock(&dev->mode_config.mutex);
857                 return;
858         }
859
860         /*
861          * TODO: temporary guard to look for proper fix
862          * if this sink is MST sink, we should not do anything
863          */
864         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
865                 return;
866
867         if (aconnector->dc_sink == sink) {
868                 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
869                  * Do nothing!! */
870                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
871                                 aconnector->connector_id);
872                 return;
873         }
874
875         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
876                 aconnector->connector_id, aconnector->dc_sink, sink);
877
878         mutex_lock(&dev->mode_config.mutex);
879
880         /* 1. Update status of the drm connector
881          * 2. Send an event and let userspace tell us what to do */
882         if (sink) {
883                 /* TODO: check if we still need the S3 mode update workaround.
884                  * If yes, put it here. */
885                 if (aconnector->dc_sink)
886                         amdgpu_dm_remove_sink_from_freesync_module(
887                                                         connector);
888
889                 aconnector->dc_sink = sink;
890                 if (sink->dc_edid.length == 0) {
891                         aconnector->edid = NULL;
892                 } else {
893                         aconnector->edid =
894                                 (struct edid *) sink->dc_edid.raw_edid;
895
896
897                         drm_mode_connector_update_edid_property(connector,
898                                         aconnector->edid);
899                 }
900                 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
901
902         } else {
903                 amdgpu_dm_remove_sink_from_freesync_module(connector);
904                 drm_mode_connector_update_edid_property(connector, NULL);
905                 aconnector->num_modes = 0;
906                 aconnector->dc_sink = NULL;
907         }
908
909         mutex_unlock(&dev->mode_config.mutex);
910 }
911
912 static void handle_hpd_irq(void *param)
913 {
914         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
915         struct drm_connector *connector = &aconnector->base;
916         struct drm_device *dev = connector->dev;
917
918         /* In case of failure or MST no need to update connector status or notify the OS
919          * since (for MST case) MST does this in it's own context.
920          */
921         mutex_lock(&aconnector->hpd_lock);
922
923         if (aconnector->fake_enable)
924                 aconnector->fake_enable = false;
925
926         if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
927                 amdgpu_dm_update_connector_after_detect(aconnector);
928
929
930                 drm_modeset_lock_all(dev);
931                 dm_restore_drm_connector_state(dev, connector);
932                 drm_modeset_unlock_all(dev);
933
934                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
935                         drm_kms_helper_hotplug_event(dev);
936         }
937         mutex_unlock(&aconnector->hpd_lock);
938
939 }
940
941 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
942 {
943         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
944         uint8_t dret;
945         bool new_irq_handled = false;
946         int dpcd_addr;
947         int dpcd_bytes_to_read;
948
949         const int max_process_count = 30;
950         int process_count = 0;
951
952         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
953
954         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
955                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
956                 /* DPCD 0x200 - 0x201 for downstream IRQ */
957                 dpcd_addr = DP_SINK_COUNT;
958         } else {
959                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
960                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
961                 dpcd_addr = DP_SINK_COUNT_ESI;
962         }
963
964         dret = drm_dp_dpcd_read(
965                 &aconnector->dm_dp_aux.aux,
966                 dpcd_addr,
967                 esi,
968                 dpcd_bytes_to_read);
969
970         while (dret == dpcd_bytes_to_read &&
971                 process_count < max_process_count) {
972                 uint8_t retry;
973                 dret = 0;
974
975                 process_count++;
976
977                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
978                 /* handle HPD short pulse irq */
979                 if (aconnector->mst_mgr.mst_state)
980                         drm_dp_mst_hpd_irq(
981                                 &aconnector->mst_mgr,
982                                 esi,
983                                 &new_irq_handled);
984
985                 if (new_irq_handled) {
986                         /* ACK at DPCD to notify down stream */
987                         const int ack_dpcd_bytes_to_write =
988                                 dpcd_bytes_to_read - 1;
989
990                         for (retry = 0; retry < 3; retry++) {
991                                 uint8_t wret;
992
993                                 wret = drm_dp_dpcd_write(
994                                         &aconnector->dm_dp_aux.aux,
995                                         dpcd_addr + 1,
996                                         &esi[1],
997                                         ack_dpcd_bytes_to_write);
998                                 if (wret == ack_dpcd_bytes_to_write)
999                                         break;
1000                         }
1001
1002                         /* check if there is new irq to be handle */
1003                         dret = drm_dp_dpcd_read(
1004                                 &aconnector->dm_dp_aux.aux,
1005                                 dpcd_addr,
1006                                 esi,
1007                                 dpcd_bytes_to_read);
1008
1009                         new_irq_handled = false;
1010                 } else {
1011                         break;
1012                 }
1013         }
1014
1015         if (process_count == max_process_count)
1016                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
1017 }
1018
1019 static void handle_hpd_rx_irq(void *param)
1020 {
1021         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
1022         struct drm_connector *connector = &aconnector->base;
1023         struct drm_device *dev = connector->dev;
1024         struct dc_link *dc_link = aconnector->dc_link;
1025         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1026
1027         /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1028          * conflict, after implement i2c helper, this mutex should be
1029          * retired.
1030          */
1031         if (dc_link->type != dc_connection_mst_branch)
1032                 mutex_lock(&aconnector->hpd_lock);
1033
1034         if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
1035                         !is_mst_root_connector) {
1036                 /* Downstream Port status changed. */
1037                 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1038                         amdgpu_dm_update_connector_after_detect(aconnector);
1039
1040
1041                         drm_modeset_lock_all(dev);
1042                         dm_restore_drm_connector_state(dev, connector);
1043                         drm_modeset_unlock_all(dev);
1044
1045                         drm_kms_helper_hotplug_event(dev);
1046                 }
1047         }
1048         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
1049             (dc_link->type == dc_connection_mst_branch))
1050                 dm_handle_hpd_rx_irq(aconnector);
1051
1052         if (dc_link->type != dc_connection_mst_branch)
1053                 mutex_unlock(&aconnector->hpd_lock);
1054 }
1055
1056 static void register_hpd_handlers(struct amdgpu_device *adev)
1057 {
1058         struct drm_device *dev = adev->ddev;
1059         struct drm_connector *connector;
1060         struct amdgpu_dm_connector *aconnector;
1061         const struct dc_link *dc_link;
1062         struct dc_interrupt_params int_params = {0};
1063
1064         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1065         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1066
1067         list_for_each_entry(connector,
1068                         &dev->mode_config.connector_list, head) {
1069
1070                 aconnector = to_amdgpu_dm_connector(connector);
1071                 dc_link = aconnector->dc_link;
1072
1073                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
1074                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1075                         int_params.irq_source = dc_link->irq_source_hpd;
1076
1077                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1078                                         handle_hpd_irq,
1079                                         (void *) aconnector);
1080                 }
1081
1082                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
1083
1084                         /* Also register for DP short pulse (hpd_rx). */
1085                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
1086                         int_params.irq_source = dc_link->irq_source_hpd_rx;
1087
1088                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
1089                                         handle_hpd_rx_irq,
1090                                         (void *) aconnector);
1091                 }
1092         }
1093 }
1094
1095 /* Register IRQ sources and initialize IRQ callbacks */
1096 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
1097 {
1098         struct dc *dc = adev->dm.dc;
1099         struct common_irq_params *c_irq_params;
1100         struct dc_interrupt_params int_params = {0};
1101         int r;
1102         int i;
1103         unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
1104
1105         if (adev->asic_type == CHIP_VEGA10 ||
1106             adev->asic_type == CHIP_RAVEN)
1107                 client_id = AMDGPU_IH_CLIENTID_DCE;
1108
1109         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1110         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1111
1112         /* Actions of amdgpu_irq_add_id():
1113          * 1. Register a set() function with base driver.
1114          *    Base driver will call set() function to enable/disable an
1115          *    interrupt in DC hardware.
1116          * 2. Register amdgpu_dm_irq_handler().
1117          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1118          *    coming from DC hardware.
1119          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1120          *    for acknowledging and handling. */
1121
1122         /* Use VBLANK interrupt */
1123         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
1124                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
1125                 if (r) {
1126                         DRM_ERROR("Failed to add crtc irq id!\n");
1127                         return r;
1128                 }
1129
1130                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1131                 int_params.irq_source =
1132                         dc_interrupt_to_irq_source(dc, i, 0);
1133
1134                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1135
1136                 c_irq_params->adev = adev;
1137                 c_irq_params->irq_src = int_params.irq_source;
1138
1139                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1140                                 dm_crtc_high_irq, c_irq_params);
1141         }
1142
1143         /* Use GRPH_PFLIP interrupt */
1144         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
1145                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
1146                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
1147                 if (r) {
1148                         DRM_ERROR("Failed to add page flip irq id!\n");
1149                         return r;
1150                 }
1151
1152                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1153                 int_params.irq_source =
1154                         dc_interrupt_to_irq_source(dc, i, 0);
1155
1156                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1157
1158                 c_irq_params->adev = adev;
1159                 c_irq_params->irq_src = int_params.irq_source;
1160
1161                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1162                                 dm_pflip_high_irq, c_irq_params);
1163
1164         }
1165
1166         /* HPD */
1167         r = amdgpu_irq_add_id(adev, client_id,
1168                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
1169         if (r) {
1170                 DRM_ERROR("Failed to add hpd irq id!\n");
1171                 return r;
1172         }
1173
1174         register_hpd_handlers(adev);
1175
1176         return 0;
1177 }
1178
1179 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1180 /* Register IRQ sources and initialize IRQ callbacks */
1181 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1182 {
1183         struct dc *dc = adev->dm.dc;
1184         struct common_irq_params *c_irq_params;
1185         struct dc_interrupt_params int_params = {0};
1186         int r;
1187         int i;
1188
1189         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
1190         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
1191
1192         /* Actions of amdgpu_irq_add_id():
1193          * 1. Register a set() function with base driver.
1194          *    Base driver will call set() function to enable/disable an
1195          *    interrupt in DC hardware.
1196          * 2. Register amdgpu_dm_irq_handler().
1197          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
1198          *    coming from DC hardware.
1199          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
1200          *    for acknowledging and handling.
1201          * */
1202
1203         /* Use VSTARTUP interrupt */
1204         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
1205                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
1206                         i++) {
1207                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
1208
1209                 if (r) {
1210                         DRM_ERROR("Failed to add crtc irq id!\n");
1211                         return r;
1212                 }
1213
1214                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1215                 int_params.irq_source =
1216                         dc_interrupt_to_irq_source(dc, i, 0);
1217
1218                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
1219
1220                 c_irq_params->adev = adev;
1221                 c_irq_params->irq_src = int_params.irq_source;
1222
1223                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1224                                 dm_crtc_high_irq, c_irq_params);
1225         }
1226
1227         /* Use GRPH_PFLIP interrupt */
1228         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
1229                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
1230                         i++) {
1231                 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
1232                 if (r) {
1233                         DRM_ERROR("Failed to add page flip irq id!\n");
1234                         return r;
1235                 }
1236
1237                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
1238                 int_params.irq_source =
1239                         dc_interrupt_to_irq_source(dc, i, 0);
1240
1241                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
1242
1243                 c_irq_params->adev = adev;
1244                 c_irq_params->irq_src = int_params.irq_source;
1245
1246                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
1247                                 dm_pflip_high_irq, c_irq_params);
1248
1249         }
1250
1251         /* HPD */
1252         r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
1253                         &adev->hpd_irq);
1254         if (r) {
1255                 DRM_ERROR("Failed to add hpd irq id!\n");
1256                 return r;
1257         }
1258
1259         register_hpd_handlers(adev);
1260
1261         return 0;
1262 }
1263 #endif
1264
1265 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1266 {
1267         int r;
1268
1269         adev->mode_info.mode_config_initialized = true;
1270
1271         adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
1272         adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1273
1274         adev->ddev->mode_config.max_width = 16384;
1275         adev->ddev->mode_config.max_height = 16384;
1276
1277         adev->ddev->mode_config.preferred_depth = 24;
1278         adev->ddev->mode_config.prefer_shadow = 1;
1279         /* indicate support of immediate flip */
1280         adev->ddev->mode_config.async_page_flip = true;
1281
1282         adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1283
1284         r = amdgpu_modeset_create_props(adev);
1285         if (r)
1286                 return r;
1287
1288         return 0;
1289 }
1290
1291 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1292         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1293
1294 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1295 {
1296         struct amdgpu_display_manager *dm = bl_get_data(bd);
1297
1298         if (dc_link_set_backlight_level(dm->backlight_link,
1299                         bd->props.brightness, 0, 0))
1300                 return 0;
1301         else
1302                 return 1;
1303 }
1304
1305 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1306 {
1307         return bd->props.brightness;
1308 }
1309
1310 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1311         .get_brightness = amdgpu_dm_backlight_get_brightness,
1312         .update_status  = amdgpu_dm_backlight_update_status,
1313 };
1314
1315 static void
1316 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1317 {
1318         char bl_name[16];
1319         struct backlight_properties props = { 0 };
1320
1321         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1322         props.type = BACKLIGHT_RAW;
1323
1324         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1325                         dm->adev->ddev->primary->index);
1326
1327         dm->backlight_dev = backlight_device_register(bl_name,
1328                         dm->adev->ddev->dev,
1329                         dm,
1330                         &amdgpu_dm_backlight_ops,
1331                         &props);
1332
1333         if (IS_ERR(dm->backlight_dev))
1334                 DRM_ERROR("DM: Backlight registration failed!\n");
1335         else
1336                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
1337 }
1338
1339 #endif
1340
1341 /* In this architecture, the association
1342  * connector -> encoder -> crtc
1343  * id not really requried. The crtc and connector will hold the
1344  * display_index as an abstraction to use with DAL component
1345  *
1346  * Returns 0 on success
1347  */
1348 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1349 {
1350         struct amdgpu_display_manager *dm = &adev->dm;
1351         uint32_t i;
1352         struct amdgpu_dm_connector *aconnector = NULL;
1353         struct amdgpu_encoder *aencoder = NULL;
1354         struct amdgpu_mode_info *mode_info = &adev->mode_info;
1355         uint32_t link_cnt;
1356         unsigned long possible_crtcs;
1357
1358         link_cnt = dm->dc->caps.max_links;
1359         if (amdgpu_dm_mode_config_init(dm->adev)) {
1360                 DRM_ERROR("DM: Failed to initialize mode config\n");
1361                 return -1;
1362         }
1363
1364         for (i = 0; i < dm->dc->caps.max_planes; i++) {
1365                 struct amdgpu_plane *plane;
1366
1367                 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
1368                 mode_info->planes[i] = plane;
1369
1370                 if (!plane) {
1371                         DRM_ERROR("KMS: Failed to allocate plane\n");
1372                         goto fail;
1373                 }
1374                 plane->base.type = mode_info->plane_type[i];
1375
1376                 /*
1377                  * HACK: IGT tests expect that each plane can only have one
1378                  * one possible CRTC. For now, set one CRTC for each
1379                  * plane that is not an underlay, but still allow multiple
1380                  * CRTCs for underlay planes.
1381                  */
1382                 possible_crtcs = 1 << i;
1383                 if (i >= dm->dc->caps.max_streams)
1384                         possible_crtcs = 0xff;
1385
1386                 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
1387                         DRM_ERROR("KMS: Failed to initialize plane\n");
1388                         goto fail;
1389                 }
1390         }
1391
1392         for (i = 0; i < dm->dc->caps.max_streams; i++)
1393                 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1394                         DRM_ERROR("KMS: Failed to initialize crtc\n");
1395                         goto fail;
1396                 }
1397
1398         dm->display_indexes_num = dm->dc->caps.max_streams;
1399
1400         /* loops over all connectors on the board */
1401         for (i = 0; i < link_cnt; i++) {
1402
1403                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1404                         DRM_ERROR(
1405                                 "KMS: Cannot support more than %d display indexes\n",
1406                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
1407                         continue;
1408                 }
1409
1410                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1411                 if (!aconnector)
1412                         goto fail;
1413
1414                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1415                 if (!aencoder)
1416                         goto fail;
1417
1418                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1419                         DRM_ERROR("KMS: Failed to initialize encoder\n");
1420                         goto fail;
1421                 }
1422
1423                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1424                         DRM_ERROR("KMS: Failed to initialize connector\n");
1425                         goto fail;
1426                 }
1427
1428                 if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
1429                                 DETECT_REASON_BOOT))
1430                         amdgpu_dm_update_connector_after_detect(aconnector);
1431         }
1432
1433         /* Software is initialized. Now we can register interrupt handlers. */
1434         switch (adev->asic_type) {
1435         case CHIP_BONAIRE:
1436         case CHIP_HAWAII:
1437         case CHIP_KAVERI:
1438         case CHIP_KABINI:
1439         case CHIP_MULLINS:
1440         case CHIP_TONGA:
1441         case CHIP_FIJI:
1442         case CHIP_CARRIZO:
1443         case CHIP_STONEY:
1444         case CHIP_POLARIS11:
1445         case CHIP_POLARIS10:
1446         case CHIP_POLARIS12:
1447         case CHIP_VEGA10:
1448                 if (dce110_register_irq_handlers(dm->adev)) {
1449                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1450                         goto fail;
1451                 }
1452                 break;
1453 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1454         case CHIP_RAVEN:
1455                 if (dcn10_register_irq_handlers(dm->adev)) {
1456                         DRM_ERROR("DM: Failed to initialize IRQ\n");
1457                         goto fail;
1458                 }
1459                 /*
1460                  * Temporary disable until pplib/smu interaction is implemented
1461                  */
1462                 dm->dc->debug.disable_stutter = true;
1463                 break;
1464 #endif
1465         default:
1466                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1467                 goto fail;
1468         }
1469
1470         return 0;
1471 fail:
1472         kfree(aencoder);
1473         kfree(aconnector);
1474         for (i = 0; i < dm->dc->caps.max_planes; i++)
1475                 kfree(mode_info->planes[i]);
1476         return -1;
1477 }
1478
1479 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1480 {
1481         drm_mode_config_cleanup(dm->ddev);
1482         return;
1483 }
1484
1485 /******************************************************************************
1486  * amdgpu_display_funcs functions
1487  *****************************************************************************/
1488
1489 /**
1490  * dm_bandwidth_update - program display watermarks
1491  *
1492  * @adev: amdgpu_device pointer
1493  *
1494  * Calculate and program the display watermarks and line buffer allocation.
1495  */
1496 static void dm_bandwidth_update(struct amdgpu_device *adev)
1497 {
1498         /* TODO: implement later */
1499 }
1500
1501 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1502                                      u8 level)
1503 {
1504         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1505 }
1506
1507 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1508 {
1509         /* TODO: translate amdgpu_encoder to display_index and call DAL */
1510         return 0;
1511 }
1512
1513 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1514                                 struct drm_file *filp)
1515 {
1516         struct mod_freesync_params freesync_params;
1517         uint8_t num_streams;
1518         uint8_t i;
1519
1520         struct amdgpu_device *adev = dev->dev_private;
1521         int r = 0;
1522
1523         /* Get freesync enable flag from DRM */
1524
1525         num_streams = dc_get_current_stream_count(adev->dm.dc);
1526
1527         for (i = 0; i < num_streams; i++) {
1528                 struct dc_stream_state *stream;
1529                 stream = dc_get_stream_at_index(adev->dm.dc, i);
1530
1531                 mod_freesync_update_state(adev->dm.freesync_module,
1532                                           &stream, 1, &freesync_params);
1533         }
1534
1535         return r;
1536 }
1537
1538 static const struct amdgpu_display_funcs dm_display_funcs = {
1539         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1540         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1541         .vblank_wait = NULL,
1542         .backlight_set_level =
1543                 dm_set_backlight_level,/* called unconditionally */
1544         .backlight_get_level =
1545                 dm_get_backlight_level,/* called unconditionally */
1546         .hpd_sense = NULL,/* called unconditionally */
1547         .hpd_set_polarity = NULL, /* called unconditionally */
1548         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1549         .page_flip_get_scanoutpos =
1550                 dm_crtc_get_scanoutpos,/* called unconditionally */
1551         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1552         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1553         .notify_freesync = amdgpu_notify_freesync,
1554
1555 };
1556
1557 #if defined(CONFIG_DEBUG_KERNEL_DC)
1558
1559 static ssize_t s3_debug_store(struct device *device,
1560                               struct device_attribute *attr,
1561                               const char *buf,
1562                               size_t count)
1563 {
1564         int ret;
1565         int s3_state;
1566         struct pci_dev *pdev = to_pci_dev(device);
1567         struct drm_device *drm_dev = pci_get_drvdata(pdev);
1568         struct amdgpu_device *adev = drm_dev->dev_private;
1569
1570         ret = kstrtoint(buf, 0, &s3_state);
1571
1572         if (ret == 0) {
1573                 if (s3_state) {
1574                         dm_resume(adev);
1575                         amdgpu_dm_display_resume(adev);
1576                         drm_kms_helper_hotplug_event(adev->ddev);
1577                 } else
1578                         dm_suspend(adev);
1579         }
1580
1581         return ret == 0 ? count : 0;
1582 }
1583
1584 DEVICE_ATTR_WO(s3_debug);
1585
1586 #endif
1587
1588 static int dm_early_init(void *handle)
1589 {
1590         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1591
1592         adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
1593         amdgpu_dm_set_irq_funcs(adev);
1594
1595         switch (adev->asic_type) {
1596         case CHIP_BONAIRE:
1597         case CHIP_HAWAII:
1598                 adev->mode_info.num_crtc = 6;
1599                 adev->mode_info.num_hpd = 6;
1600                 adev->mode_info.num_dig = 6;
1601                 adev->mode_info.plane_type = dm_plane_type_default;
1602                 break;
1603         case CHIP_KAVERI:
1604                 adev->mode_info.num_crtc = 4;
1605                 adev->mode_info.num_hpd = 6;
1606                 adev->mode_info.num_dig = 7;
1607                 adev->mode_info.plane_type = dm_plane_type_default;
1608                 break;
1609         case CHIP_KABINI:
1610         case CHIP_MULLINS:
1611                 adev->mode_info.num_crtc = 2;
1612                 adev->mode_info.num_hpd = 6;
1613                 adev->mode_info.num_dig = 6;
1614                 adev->mode_info.plane_type = dm_plane_type_default;
1615                 break;
1616         case CHIP_FIJI:
1617         case CHIP_TONGA:
1618                 adev->mode_info.num_crtc = 6;
1619                 adev->mode_info.num_hpd = 6;
1620                 adev->mode_info.num_dig = 7;
1621                 adev->mode_info.plane_type = dm_plane_type_default;
1622                 break;
1623         case CHIP_CARRIZO:
1624                 adev->mode_info.num_crtc = 3;
1625                 adev->mode_info.num_hpd = 6;
1626                 adev->mode_info.num_dig = 9;
1627                 adev->mode_info.plane_type = dm_plane_type_carizzo;
1628                 break;
1629         case CHIP_STONEY:
1630                 adev->mode_info.num_crtc = 2;
1631                 adev->mode_info.num_hpd = 6;
1632                 adev->mode_info.num_dig = 9;
1633                 adev->mode_info.plane_type = dm_plane_type_stoney;
1634                 break;
1635         case CHIP_POLARIS11:
1636         case CHIP_POLARIS12:
1637                 adev->mode_info.num_crtc = 5;
1638                 adev->mode_info.num_hpd = 5;
1639                 adev->mode_info.num_dig = 5;
1640                 adev->mode_info.plane_type = dm_plane_type_default;
1641                 break;
1642         case CHIP_POLARIS10:
1643                 adev->mode_info.num_crtc = 6;
1644                 adev->mode_info.num_hpd = 6;
1645                 adev->mode_info.num_dig = 6;
1646                 adev->mode_info.plane_type = dm_plane_type_default;
1647                 break;
1648         case CHIP_VEGA10:
1649                 adev->mode_info.num_crtc = 6;
1650                 adev->mode_info.num_hpd = 6;
1651                 adev->mode_info.num_dig = 6;
1652                 adev->mode_info.plane_type = dm_plane_type_default;
1653                 break;
1654 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1655         case CHIP_RAVEN:
1656                 adev->mode_info.num_crtc = 4;
1657                 adev->mode_info.num_hpd = 4;
1658                 adev->mode_info.num_dig = 4;
1659                 adev->mode_info.plane_type = dm_plane_type_default;
1660                 break;
1661 #endif
1662         default:
1663                 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1664                 return -EINVAL;
1665         }
1666
1667         if (adev->mode_info.funcs == NULL)
1668                 adev->mode_info.funcs = &dm_display_funcs;
1669
1670         /* Note: Do NOT change adev->audio_endpt_rreg and
1671          * adev->audio_endpt_wreg because they are initialised in
1672          * amdgpu_device_init() */
1673 #if defined(CONFIG_DEBUG_KERNEL_DC)
1674         device_create_file(
1675                 adev->ddev->dev,
1676                 &dev_attr_s3_debug);
1677 #endif
1678
1679         return 0;
1680 }
1681
1682 struct dm_connector_state {
1683         struct drm_connector_state base;
1684
1685         enum amdgpu_rmx_type scaling;
1686         uint8_t underscan_vborder;
1687         uint8_t underscan_hborder;
1688         bool underscan_enable;
1689 };
1690
1691 #define to_dm_connector_state(x)\
1692         container_of((x), struct dm_connector_state, base)
1693
1694 static bool modeset_required(struct drm_crtc_state *crtc_state,
1695                              struct dc_stream_state *new_stream,
1696                              struct dc_stream_state *old_stream)
1697 {
1698         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1699                 return false;
1700
1701         if (!crtc_state->enable)
1702                 return false;
1703
1704         return crtc_state->active;
1705 }
1706
1707 static bool modereset_required(struct drm_crtc_state *crtc_state)
1708 {
1709         if (!drm_atomic_crtc_needs_modeset(crtc_state))
1710                 return false;
1711
1712         return !crtc_state->enable || !crtc_state->active;
1713 }
1714
1715 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
1716 {
1717         drm_encoder_cleanup(encoder);
1718         kfree(encoder);
1719 }
1720
1721 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
1722         .destroy = amdgpu_dm_encoder_destroy,
1723 };
1724
1725 static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
1726                                         struct dc_plane_state *plane_state)
1727 {
1728         plane_state->src_rect.x = state->src_x >> 16;
1729         plane_state->src_rect.y = state->src_y >> 16;
1730         /*we ignore for now mantissa and do not to deal with floating pixels :(*/
1731         plane_state->src_rect.width = state->src_w >> 16;
1732
1733         if (plane_state->src_rect.width == 0)
1734                 return false;
1735
1736         plane_state->src_rect.height = state->src_h >> 16;
1737         if (plane_state->src_rect.height == 0)
1738                 return false;
1739
1740         plane_state->dst_rect.x = state->crtc_x;
1741         plane_state->dst_rect.y = state->crtc_y;
1742
1743         if (state->crtc_w == 0)
1744                 return false;
1745
1746         plane_state->dst_rect.width = state->crtc_w;
1747
1748         if (state->crtc_h == 0)
1749                 return false;
1750
1751         plane_state->dst_rect.height = state->crtc_h;
1752
1753         plane_state->clip_rect = plane_state->dst_rect;
1754
1755         switch (state->rotation & DRM_MODE_ROTATE_MASK) {
1756         case DRM_MODE_ROTATE_0:
1757                 plane_state->rotation = ROTATION_ANGLE_0;
1758                 break;
1759         case DRM_MODE_ROTATE_90:
1760                 plane_state->rotation = ROTATION_ANGLE_90;
1761                 break;
1762         case DRM_MODE_ROTATE_180:
1763                 plane_state->rotation = ROTATION_ANGLE_180;
1764                 break;
1765         case DRM_MODE_ROTATE_270:
1766                 plane_state->rotation = ROTATION_ANGLE_270;
1767                 break;
1768         default:
1769                 plane_state->rotation = ROTATION_ANGLE_0;
1770                 break;
1771         }
1772
1773         return true;
1774 }
1775 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1776                        uint64_t *tiling_flags,
1777                        uint64_t *fb_location)
1778 {
1779         struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
1780         int r = amdgpu_bo_reserve(rbo, false);
1781
1782         if (unlikely(r)) {
1783                 // Don't show error msg. when return -ERESTARTSYS
1784                 if (r != -ERESTARTSYS)
1785                         DRM_ERROR("Unable to reserve buffer: %d\n", r);
1786                 return r;
1787         }
1788
1789         if (fb_location)
1790                 *fb_location = amdgpu_bo_gpu_offset(rbo);
1791
1792         if (tiling_flags)
1793                 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1794
1795         amdgpu_bo_unreserve(rbo);
1796
1797         return r;
1798 }
1799
1800 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
1801                                          struct dc_plane_state *plane_state,
1802                                          const struct amdgpu_framebuffer *amdgpu_fb,
1803                                          bool addReq)
1804 {
1805         uint64_t tiling_flags;
1806         uint64_t fb_location = 0;
1807         uint64_t chroma_addr = 0;
1808         unsigned int awidth;
1809         const struct drm_framebuffer *fb = &amdgpu_fb->base;
1810         int ret = 0;
1811         struct drm_format_name_buf format_name;
1812
1813         ret = get_fb_info(
1814                 amdgpu_fb,
1815                 &tiling_flags,
1816                 addReq == true ? &fb_location:NULL);
1817
1818         if (ret)
1819                 return ret;
1820
1821         switch (fb->format->format) {
1822         case DRM_FORMAT_C8:
1823                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
1824                 break;
1825         case DRM_FORMAT_RGB565:
1826                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
1827                 break;
1828         case DRM_FORMAT_XRGB8888:
1829         case DRM_FORMAT_ARGB8888:
1830                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
1831                 break;
1832         case DRM_FORMAT_XRGB2101010:
1833         case DRM_FORMAT_ARGB2101010:
1834                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
1835                 break;
1836         case DRM_FORMAT_XBGR2101010:
1837         case DRM_FORMAT_ABGR2101010:
1838                 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
1839                 break;
1840         case DRM_FORMAT_NV21:
1841                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
1842                 break;
1843         case DRM_FORMAT_NV12:
1844                 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
1845                 break;
1846         default:
1847                 DRM_ERROR("Unsupported screen format %s\n",
1848                           drm_get_format_name(fb->format->format, &format_name));
1849                 return -EINVAL;
1850         }
1851
1852         if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
1853                 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
1854                 plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
1855                 plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
1856                 plane_state->plane_size.grph.surface_size.x = 0;
1857                 plane_state->plane_size.grph.surface_size.y = 0;
1858                 plane_state->plane_size.grph.surface_size.width = fb->width;
1859                 plane_state->plane_size.grph.surface_size.height = fb->height;
1860                 plane_state->plane_size.grph.surface_pitch =
1861                                 fb->pitches[0] / fb->format->cpp[0];
1862                 /* TODO: unhardcode */
1863                 plane_state->color_space = COLOR_SPACE_SRGB;
1864
1865         } else {
1866                 awidth = ALIGN(fb->width, 64);
1867                 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
1868                 plane_state->address.video_progressive.luma_addr.low_part
1869                                                 = lower_32_bits(fb_location);
1870                 plane_state->address.video_progressive.luma_addr.high_part
1871                                                 = upper_32_bits(fb_location);
1872                 chroma_addr = fb_location + (u64)(awidth * fb->height);
1873                 plane_state->address.video_progressive.chroma_addr.low_part
1874                                                 = lower_32_bits(chroma_addr);
1875                 plane_state->address.video_progressive.chroma_addr.high_part
1876                                                 = upper_32_bits(chroma_addr);
1877                 plane_state->plane_size.video.luma_size.x = 0;
1878                 plane_state->plane_size.video.luma_size.y = 0;
1879                 plane_state->plane_size.video.luma_size.width = awidth;
1880                 plane_state->plane_size.video.luma_size.height = fb->height;
1881                 /* TODO: unhardcode */
1882                 plane_state->plane_size.video.luma_pitch = awidth;
1883
1884                 plane_state->plane_size.video.chroma_size.x = 0;
1885                 plane_state->plane_size.video.chroma_size.y = 0;
1886                 plane_state->plane_size.video.chroma_size.width = awidth;
1887                 plane_state->plane_size.video.chroma_size.height = fb->height;
1888                 plane_state->plane_size.video.chroma_pitch = awidth / 2;
1889
1890                 /* TODO: unhardcode */
1891                 plane_state->color_space = COLOR_SPACE_YCBCR709;
1892         }
1893
1894         memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
1895
1896         /* Fill GFX8 params */
1897         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
1898                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
1899
1900                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1901                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1902                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1903                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1904                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1905
1906                 /* XXX fix me for VI */
1907                 plane_state->tiling_info.gfx8.num_banks = num_banks;
1908                 plane_state->tiling_info.gfx8.array_mode =
1909                                 DC_ARRAY_2D_TILED_THIN1;
1910                 plane_state->tiling_info.gfx8.tile_split = tile_split;
1911                 plane_state->tiling_info.gfx8.bank_width = bankw;
1912                 plane_state->tiling_info.gfx8.bank_height = bankh;
1913                 plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
1914                 plane_state->tiling_info.gfx8.tile_mode =
1915                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
1916         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
1917                         == DC_ARRAY_1D_TILED_THIN1) {
1918                 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
1919         }
1920
1921         plane_state->tiling_info.gfx8.pipe_config =
1922                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1923
1924         if (adev->asic_type == CHIP_VEGA10 ||
1925             adev->asic_type == CHIP_RAVEN) {
1926                 /* Fill GFX9 params */
1927                 plane_state->tiling_info.gfx9.num_pipes =
1928                         adev->gfx.config.gb_addr_config_fields.num_pipes;
1929                 plane_state->tiling_info.gfx9.num_banks =
1930                         adev->gfx.config.gb_addr_config_fields.num_banks;
1931                 plane_state->tiling_info.gfx9.pipe_interleave =
1932                         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
1933                 plane_state->tiling_info.gfx9.num_shader_engines =
1934                         adev->gfx.config.gb_addr_config_fields.num_se;
1935                 plane_state->tiling_info.gfx9.max_compressed_frags =
1936                         adev->gfx.config.gb_addr_config_fields.max_compress_frags;
1937                 plane_state->tiling_info.gfx9.num_rb_per_se =
1938                         adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
1939                 plane_state->tiling_info.gfx9.swizzle =
1940                         AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1941                 plane_state->tiling_info.gfx9.shaderEnable = 1;
1942         }
1943
1944         plane_state->visible = true;
1945         plane_state->scaling_quality.h_taps_c = 0;
1946         plane_state->scaling_quality.v_taps_c = 0;
1947
1948         /* is this needed? is plane_state zeroed at allocation? */
1949         plane_state->scaling_quality.h_taps = 0;
1950         plane_state->scaling_quality.v_taps = 0;
1951         plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
1952
1953         return ret;
1954
1955 }
1956
1957 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
1958                                        struct dc_plane_state *plane_state)
1959 {
1960         int i;
1961         struct dc_gamma *gamma;
1962         struct drm_color_lut *lut =
1963                         (struct drm_color_lut *) crtc_state->gamma_lut->data;
1964
1965         gamma = dc_create_gamma();
1966
1967         if (gamma == NULL) {
1968                 WARN_ON(1);
1969                 return;
1970         }
1971
1972         gamma->type = GAMMA_RGB_256;
1973         gamma->num_entries = GAMMA_RGB_256_ENTRIES;
1974         for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
1975                 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
1976                 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
1977                 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
1978         }
1979
1980         plane_state->gamma_correction = gamma;
1981 }
1982
1983 static int fill_plane_attributes(struct amdgpu_device *adev,
1984                                  struct dc_plane_state *dc_plane_state,
1985                                  struct drm_plane_state *plane_state,
1986                                  struct drm_crtc_state *crtc_state,
1987                                  bool addrReq)
1988 {
1989         const struct amdgpu_framebuffer *amdgpu_fb =
1990                 to_amdgpu_framebuffer(plane_state->fb);
1991         const struct drm_crtc *crtc = plane_state->crtc;
1992         struct dc_transfer_func *input_tf;
1993         int ret = 0;
1994
1995         if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
1996                 return -EINVAL;
1997
1998         ret = fill_plane_attributes_from_fb(
1999                 crtc->dev->dev_private,
2000                 dc_plane_state,
2001                 amdgpu_fb,
2002                 addrReq);
2003
2004         if (ret)
2005                 return ret;
2006
2007         input_tf = dc_create_transfer_func();
2008
2009         if (input_tf == NULL)
2010                 return -ENOMEM;
2011
2012         input_tf->type = TF_TYPE_PREDEFINED;
2013         input_tf->tf = TRANSFER_FUNCTION_SRGB;
2014
2015         dc_plane_state->in_transfer_func = input_tf;
2016
2017         /* In case of gamma set, update gamma value */
2018         if (crtc_state->gamma_lut)
2019                 fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
2020
2021         return ret;
2022 }
2023
2024 /*****************************************************************************/
2025
2026 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
2027                                            const struct dm_connector_state *dm_state,
2028                                            struct dc_stream_state *stream)
2029 {
2030         enum amdgpu_rmx_type rmx_type;
2031
2032         struct rect src = { 0 }; /* viewport in composition space*/
2033         struct rect dst = { 0 }; /* stream addressable area */
2034
2035         /* no mode. nothing to be done */
2036         if (!mode)
2037                 return;
2038
2039         /* Full screen scaling by default */
2040         src.width = mode->hdisplay;
2041         src.height = mode->vdisplay;
2042         dst.width = stream->timing.h_addressable;
2043         dst.height = stream->timing.v_addressable;
2044
2045         rmx_type = dm_state->scaling;
2046         if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
2047                 if (src.width * dst.height <
2048                                 src.height * dst.width) {
2049                         /* height needs less upscaling/more downscaling */
2050                         dst.width = src.width *
2051                                         dst.height / src.height;
2052                 } else {
2053                         /* width needs less upscaling/more downscaling */
2054                         dst.height = src.height *
2055                                         dst.width / src.width;
2056                 }
2057         } else if (rmx_type == RMX_CENTER) {
2058                 dst = src;
2059         }
2060
2061         dst.x = (stream->timing.h_addressable - dst.width) / 2;
2062         dst.y = (stream->timing.v_addressable - dst.height) / 2;
2063
2064         if (dm_state->underscan_enable) {
2065                 dst.x += dm_state->underscan_hborder / 2;
2066                 dst.y += dm_state->underscan_vborder / 2;
2067                 dst.width -= dm_state->underscan_hborder;
2068                 dst.height -= dm_state->underscan_vborder;
2069         }
2070
2071         stream->src = src;
2072         stream->dst = dst;
2073
2074         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
2075                         dst.x, dst.y, dst.width, dst.height);
2076
2077 }
2078
2079 static enum dc_color_depth
2080 convert_color_depth_from_display_info(const struct drm_connector *connector)
2081 {
2082         uint32_t bpc = connector->display_info.bpc;
2083
2084         /* Limited color depth to 8bit
2085          * TODO: Still need to handle deep color
2086          */
2087         if (bpc > 8)
2088                 bpc = 8;
2089
2090         switch (bpc) {
2091         case 0:
2092                 /* Temporary Work around, DRM don't parse color depth for
2093                  * EDID revision before 1.4
2094                  * TODO: Fix edid parsing
2095                  */
2096                 return COLOR_DEPTH_888;
2097         case 6:
2098                 return COLOR_DEPTH_666;
2099         case 8:
2100                 return COLOR_DEPTH_888;
2101         case 10:
2102                 return COLOR_DEPTH_101010;
2103         case 12:
2104                 return COLOR_DEPTH_121212;
2105         case 14:
2106                 return COLOR_DEPTH_141414;
2107         case 16:
2108                 return COLOR_DEPTH_161616;
2109         default:
2110                 return COLOR_DEPTH_UNDEFINED;
2111         }
2112 }
2113
2114 static enum dc_aspect_ratio
2115 get_aspect_ratio(const struct drm_display_mode *mode_in)
2116 {
2117         int32_t width = mode_in->crtc_hdisplay * 9;
2118         int32_t height = mode_in->crtc_vdisplay * 16;
2119
2120         if ((width - height) < 10 && (width - height) > -10)
2121                 return ASPECT_RATIO_16_9;
2122         else
2123                 return ASPECT_RATIO_4_3;
2124 }
2125
2126 static enum dc_color_space
2127 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
2128 {
2129         enum dc_color_space color_space = COLOR_SPACE_SRGB;
2130
2131         switch (dc_crtc_timing->pixel_encoding) {
2132         case PIXEL_ENCODING_YCBCR422:
2133         case PIXEL_ENCODING_YCBCR444:
2134         case PIXEL_ENCODING_YCBCR420:
2135         {
2136                 /*
2137                  * 27030khz is the separation point between HDTV and SDTV
2138                  * according to HDMI spec, we use YCbCr709 and YCbCr601
2139                  * respectively
2140                  */
2141                 if (dc_crtc_timing->pix_clk_khz > 27030) {
2142                         if (dc_crtc_timing->flags.Y_ONLY)
2143                                 color_space =
2144                                         COLOR_SPACE_YCBCR709_LIMITED;
2145                         else
2146                                 color_space = COLOR_SPACE_YCBCR709;
2147                 } else {
2148                         if (dc_crtc_timing->flags.Y_ONLY)
2149                                 color_space =
2150                                         COLOR_SPACE_YCBCR601_LIMITED;
2151                         else
2152                                 color_space = COLOR_SPACE_YCBCR601;
2153                 }
2154
2155         }
2156         break;
2157         case PIXEL_ENCODING_RGB:
2158                 color_space = COLOR_SPACE_SRGB;
2159                 break;
2160
2161         default:
2162                 WARN_ON(1);
2163                 break;
2164         }
2165
2166         return color_space;
2167 }
2168
2169 /*****************************************************************************/
2170
2171 static void
2172 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
2173                                              const struct drm_display_mode *mode_in,
2174                                              const struct drm_connector *connector)
2175 {
2176         struct dc_crtc_timing *timing_out = &stream->timing;
2177
2178         memset(timing_out, 0, sizeof(struct dc_crtc_timing));
2179
2180         timing_out->h_border_left = 0;
2181         timing_out->h_border_right = 0;
2182         timing_out->v_border_top = 0;
2183         timing_out->v_border_bottom = 0;
2184         /* TODO: un-hardcode */
2185
2186         if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
2187                         && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
2188                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
2189         else
2190                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
2191
2192         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
2193         timing_out->display_color_depth = convert_color_depth_from_display_info(
2194                         connector);
2195         timing_out->scan_type = SCANNING_TYPE_NODATA;
2196         timing_out->hdmi_vic = 0;
2197         timing_out->vic = drm_match_cea_mode(mode_in);
2198
2199         timing_out->h_addressable = mode_in->crtc_hdisplay;
2200         timing_out->h_total = mode_in->crtc_htotal;
2201         timing_out->h_sync_width =
2202                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
2203         timing_out->h_front_porch =
2204                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
2205         timing_out->v_total = mode_in->crtc_vtotal;
2206         timing_out->v_addressable = mode_in->crtc_vdisplay;
2207         timing_out->v_front_porch =
2208                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
2209         timing_out->v_sync_width =
2210                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
2211         timing_out->pix_clk_khz = mode_in->crtc_clock;
2212         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
2213         if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
2214                 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
2215         if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
2216                 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
2217
2218         stream->output_color_space = get_output_color_space(timing_out);
2219
2220         {
2221                 struct dc_transfer_func *tf = dc_create_transfer_func();
2222
2223                 tf->type = TF_TYPE_PREDEFINED;
2224                 tf->tf = TRANSFER_FUNCTION_SRGB;
2225                 stream->out_transfer_func = tf;
2226         }
2227 }
2228
2229 static void fill_audio_info(struct audio_info *audio_info,
2230                             const struct drm_connector *drm_connector,
2231                             const struct dc_sink *dc_sink)
2232 {
2233         int i = 0;
2234         int cea_revision = 0;
2235         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
2236
2237         audio_info->manufacture_id = edid_caps->manufacturer_id;
2238         audio_info->product_id = edid_caps->product_id;
2239
2240         cea_revision = drm_connector->display_info.cea_rev;
2241
2242         strncpy(audio_info->display_name,
2243                 edid_caps->display_name,
2244                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
2245
2246         if (cea_revision >= 3) {
2247                 audio_info->mode_count = edid_caps->audio_mode_count;
2248
2249                 for (i = 0; i < audio_info->mode_count; ++i) {
2250                         audio_info->modes[i].format_code =
2251                                         (enum audio_format_code)
2252                                         (edid_caps->audio_modes[i].format_code);
2253                         audio_info->modes[i].channel_count =
2254                                         edid_caps->audio_modes[i].channel_count;
2255                         audio_info->modes[i].sample_rates.all =
2256                                         edid_caps->audio_modes[i].sample_rate;
2257                         audio_info->modes[i].sample_size =
2258                                         edid_caps->audio_modes[i].sample_size;
2259                 }
2260         }
2261
2262         audio_info->flags.all = edid_caps->speaker_flags;
2263
2264         /* TODO: We only check for the progressive mode, check for interlace mode too */
2265         if (drm_connector->latency_present[0]) {
2266                 audio_info->video_latency = drm_connector->video_latency[0];
2267                 audio_info->audio_latency = drm_connector->audio_latency[0];
2268         }
2269
2270         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
2271
2272 }
2273
2274 static void
2275 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
2276                                       struct drm_display_mode *dst_mode)
2277 {
2278         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
2279         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
2280         dst_mode->crtc_clock = src_mode->crtc_clock;
2281         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
2282         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
2283         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
2284         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
2285         dst_mode->crtc_htotal = src_mode->crtc_htotal;
2286         dst_mode->crtc_hskew = src_mode->crtc_hskew;
2287         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
2288         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
2289         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
2290         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
2291         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
2292 }
2293
2294 static void
2295 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
2296                                         const struct drm_display_mode *native_mode,
2297                                         bool scale_enabled)
2298 {
2299         if (scale_enabled) {
2300                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2301         } else if (native_mode->clock == drm_mode->clock &&
2302                         native_mode->htotal == drm_mode->htotal &&
2303                         native_mode->vtotal == drm_mode->vtotal) {
2304                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
2305         } else {
2306                 /* no scaling nor amdgpu inserted, no need to patch */
2307         }
2308 }
2309
2310 static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
2311 {
2312         struct dc_sink *sink = NULL;
2313         struct dc_sink_init_data sink_init_data = { 0 };
2314
2315         sink_init_data.link = aconnector->dc_link;
2316         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
2317
2318         sink = dc_sink_create(&sink_init_data);
2319         if (!sink) {
2320                 DRM_ERROR("Failed to create sink!\n");
2321                 return -ENOMEM;
2322         }
2323
2324         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
2325         aconnector->fake_enable = true;
2326
2327         aconnector->dc_sink = sink;
2328         aconnector->dc_link->local_sink = sink;
2329
2330         return 0;
2331 }
2332
2333 static struct dc_stream_state *
2334 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2335                        const struct drm_display_mode *drm_mode,
2336                        const struct dm_connector_state *dm_state)
2337 {
2338         struct drm_display_mode *preferred_mode = NULL;
2339         const struct drm_connector *drm_connector;
2340         struct dc_stream_state *stream = NULL;
2341         struct drm_display_mode mode = *drm_mode;
2342         bool native_mode_found = false;
2343
2344         if (aconnector == NULL) {
2345                 DRM_ERROR("aconnector is NULL!\n");
2346                 goto drm_connector_null;
2347         }
2348
2349         if (dm_state == NULL) {
2350                 DRM_ERROR("dm_state is NULL!\n");
2351                 goto dm_state_null;
2352         }
2353
2354         drm_connector = &aconnector->base;
2355
2356         if (!aconnector->dc_sink) {
2357                 /*
2358                  * Exclude MST from creating fake_sink
2359                  * TODO: need to enable MST into fake_sink feature
2360                  */
2361                 if (aconnector->mst_port)
2362                         goto stream_create_fail;
2363
2364                 if (create_fake_sink(aconnector))
2365                         goto stream_create_fail;
2366         }
2367
2368         stream = dc_create_stream_for_sink(aconnector->dc_sink);
2369
2370         if (stream == NULL) {
2371                 DRM_ERROR("Failed to create stream for sink!\n");
2372                 goto stream_create_fail;
2373         }
2374
2375         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
2376                 /* Search for preferred mode */
2377                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
2378                         native_mode_found = true;
2379                         break;
2380                 }
2381         }
2382         if (!native_mode_found)
2383                 preferred_mode = list_first_entry_or_null(
2384                                 &aconnector->base.modes,
2385                                 struct drm_display_mode,
2386                                 head);
2387
2388         if (preferred_mode == NULL) {
2389                 /* This may not be an error, the use case is when we we have no
2390                  * usermode calls to reset and set mode upon hotplug. In this
2391                  * case, we call set mode ourselves to restore the previous mode
2392                  * and the modelist may not be filled in in time.
2393                  */
2394                 DRM_DEBUG_DRIVER("No preferred mode found\n");
2395         } else {
2396                 decide_crtc_timing_for_drm_display_mode(
2397                                 &mode, preferred_mode,
2398                                 dm_state->scaling != RMX_OFF);
2399         }
2400
2401         fill_stream_properties_from_drm_display_mode(stream,
2402                         &mode, &aconnector->base);
2403         update_stream_scaling_settings(&mode, dm_state, stream);
2404
2405         fill_audio_info(
2406                 &stream->audio_info,
2407                 drm_connector,
2408                 aconnector->dc_sink);
2409
2410 stream_create_fail:
2411 dm_state_null:
2412 drm_connector_null:
2413         return stream;
2414 }
2415
2416 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
2417 {
2418         drm_crtc_cleanup(crtc);
2419         kfree(crtc);
2420 }
2421
2422 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
2423                                   struct drm_crtc_state *state)
2424 {
2425         struct dm_crtc_state *cur = to_dm_crtc_state(state);
2426
2427         /* TODO Destroy dc_stream objects are stream object is flattened */
2428         if (cur->stream)
2429                 dc_stream_release(cur->stream);
2430
2431
2432         __drm_atomic_helper_crtc_destroy_state(state);
2433
2434
2435         kfree(state);
2436 }
2437
2438 static void dm_crtc_reset_state(struct drm_crtc *crtc)
2439 {
2440         struct dm_crtc_state *state;
2441
2442         if (crtc->state)
2443                 dm_crtc_destroy_state(crtc, crtc->state);
2444
2445         state = kzalloc(sizeof(*state), GFP_KERNEL);
2446         if (WARN_ON(!state))
2447                 return;
2448
2449         crtc->state = &state->base;
2450         crtc->state->crtc = crtc;
2451
2452 }
2453
2454 static struct drm_crtc_state *
2455 dm_crtc_duplicate_state(struct drm_crtc *crtc)
2456 {
2457         struct dm_crtc_state *state, *cur;
2458
2459         cur = to_dm_crtc_state(crtc->state);
2460
2461         if (WARN_ON(!crtc->state))
2462                 return NULL;
2463
2464         state = kzalloc(sizeof(*state), GFP_KERNEL);
2465         if (!state)
2466                 return NULL;
2467
2468         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
2469
2470         if (cur->stream) {
2471                 state->stream = cur->stream;
2472                 dc_stream_retain(state->stream);
2473         }
2474
2475         /* TODO Duplicate dc_stream after objects are stream object is flattened */
2476
2477         return &state->base;
2478 }
2479
2480 /* Implemented only the options currently availible for the driver */
2481 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
2482         .reset = dm_crtc_reset_state,
2483         .destroy = amdgpu_dm_crtc_destroy,
2484         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2485         .set_config = drm_atomic_helper_set_config,
2486         .page_flip = drm_atomic_helper_page_flip,
2487         .atomic_duplicate_state = dm_crtc_duplicate_state,
2488         .atomic_destroy_state = dm_crtc_destroy_state,
2489 };
2490
2491 static enum drm_connector_status
2492 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
2493 {
2494         bool connected;
2495         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2496
2497         /* Notes:
2498          * 1. This interface is NOT called in context of HPD irq.
2499          * 2. This interface *is called* in context of user-mode ioctl. Which
2500          * makes it a bad place for *any* MST-related activit. */
2501
2502         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
2503             !aconnector->fake_enable)
2504                 connected = (aconnector->dc_sink != NULL);
2505         else
2506                 connected = (aconnector->base.force == DRM_FORCE_ON);
2507
2508         return (connected ? connector_status_connected :
2509                         connector_status_disconnected);
2510 }
2511
2512 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
2513                                             struct drm_connector_state *connector_state,
2514                                             struct drm_property *property,
2515                                             uint64_t val)
2516 {
2517         struct drm_device *dev = connector->dev;
2518         struct amdgpu_device *adev = dev->dev_private;
2519         struct dm_connector_state *dm_old_state =
2520                 to_dm_connector_state(connector->state);
2521         struct dm_connector_state *dm_new_state =
2522                 to_dm_connector_state(connector_state);
2523
2524         int ret = -EINVAL;
2525
2526         if (property == dev->mode_config.scaling_mode_property) {
2527                 enum amdgpu_rmx_type rmx_type;
2528
2529                 switch (val) {
2530                 case DRM_MODE_SCALE_CENTER:
2531                         rmx_type = RMX_CENTER;
2532                         break;
2533                 case DRM_MODE_SCALE_ASPECT:
2534                         rmx_type = RMX_ASPECT;
2535                         break;
2536                 case DRM_MODE_SCALE_FULLSCREEN:
2537                         rmx_type = RMX_FULL;
2538                         break;
2539                 case DRM_MODE_SCALE_NONE:
2540                 default:
2541                         rmx_type = RMX_OFF;
2542                         break;
2543                 }
2544
2545                 if (dm_old_state->scaling == rmx_type)
2546                         return 0;
2547
2548                 dm_new_state->scaling = rmx_type;
2549                 ret = 0;
2550         } else if (property == adev->mode_info.underscan_hborder_property) {
2551                 dm_new_state->underscan_hborder = val;
2552                 ret = 0;
2553         } else if (property == adev->mode_info.underscan_vborder_property) {
2554                 dm_new_state->underscan_vborder = val;
2555                 ret = 0;
2556         } else if (property == adev->mode_info.underscan_property) {
2557                 dm_new_state->underscan_enable = val;
2558                 ret = 0;
2559         }
2560
2561         return ret;
2562 }
2563
2564 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
2565                                             const struct drm_connector_state *state,
2566                                             struct drm_property *property,
2567                                             uint64_t *val)
2568 {
2569         struct drm_device *dev = connector->dev;
2570         struct amdgpu_device *adev = dev->dev_private;
2571         struct dm_connector_state *dm_state =
2572                 to_dm_connector_state(state);
2573         int ret = -EINVAL;
2574
2575         if (property == dev->mode_config.scaling_mode_property) {
2576                 switch (dm_state->scaling) {
2577                 case RMX_CENTER:
2578                         *val = DRM_MODE_SCALE_CENTER;
2579                         break;
2580                 case RMX_ASPECT:
2581                         *val = DRM_MODE_SCALE_ASPECT;
2582                         break;
2583                 case RMX_FULL:
2584                         *val = DRM_MODE_SCALE_FULLSCREEN;
2585                         break;
2586                 case RMX_OFF:
2587                 default:
2588                         *val = DRM_MODE_SCALE_NONE;
2589                         break;
2590                 }
2591                 ret = 0;
2592         } else if (property == adev->mode_info.underscan_hborder_property) {
2593                 *val = dm_state->underscan_hborder;
2594                 ret = 0;
2595         } else if (property == adev->mode_info.underscan_vborder_property) {
2596                 *val = dm_state->underscan_vborder;
2597                 ret = 0;
2598         } else if (property == adev->mode_info.underscan_property) {
2599                 *val = dm_state->underscan_enable;
2600                 ret = 0;
2601         }
2602         return ret;
2603 }
2604
2605 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2606 {
2607         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2608         const struct dc_link *link = aconnector->dc_link;
2609         struct amdgpu_device *adev = connector->dev->dev_private;
2610         struct amdgpu_display_manager *dm = &adev->dm;
2611 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
2612         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
2613
2614         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
2615                 amdgpu_dm_register_backlight_device(dm);
2616
2617                 if (dm->backlight_dev) {
2618                         backlight_device_unregister(dm->backlight_dev);
2619                         dm->backlight_dev = NULL;
2620                 }
2621
2622         }
2623 #endif
2624         drm_connector_unregister(connector);
2625         drm_connector_cleanup(connector);
2626         kfree(connector);
2627 }
2628
2629 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
2630 {
2631         struct dm_connector_state *state =
2632                 to_dm_connector_state(connector->state);
2633
2634         kfree(state);
2635
2636         state = kzalloc(sizeof(*state), GFP_KERNEL);
2637
2638         if (state) {
2639                 state->scaling = RMX_OFF;
2640                 state->underscan_enable = false;
2641                 state->underscan_hborder = 0;
2642                 state->underscan_vborder = 0;
2643
2644                 connector->state = &state->base;
2645                 connector->state->connector = connector;
2646         }
2647 }
2648
2649 struct drm_connector_state *
2650 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
2651 {
2652         struct dm_connector_state *state =
2653                 to_dm_connector_state(connector->state);
2654
2655         struct dm_connector_state *new_state =
2656                         kmemdup(state, sizeof(*state), GFP_KERNEL);
2657
2658         if (new_state) {
2659                 __drm_atomic_helper_connector_duplicate_state(connector,
2660                                                               &new_state->base);
2661                 return &new_state->base;
2662         }
2663
2664         return NULL;
2665 }
2666
2667 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
2668         .reset = amdgpu_dm_connector_funcs_reset,
2669         .detect = amdgpu_dm_connector_detect,
2670         .fill_modes = drm_helper_probe_single_connector_modes,
2671         .destroy = amdgpu_dm_connector_destroy,
2672         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
2673         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2674         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
2675         .atomic_get_property = amdgpu_dm_connector_atomic_get_property
2676 };
2677
2678 static struct drm_encoder *best_encoder(struct drm_connector *connector)
2679 {
2680         int enc_id = connector->encoder_ids[0];
2681         struct drm_mode_object *obj;
2682         struct drm_encoder *encoder;
2683
2684         DRM_DEBUG_DRIVER("Finding the best encoder\n");
2685
2686         /* pick the encoder ids */
2687         if (enc_id) {
2688                 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
2689                 if (!obj) {
2690                         DRM_ERROR("Couldn't find a matching encoder for our connector\n");
2691                         return NULL;
2692                 }
2693                 encoder = obj_to_encoder(obj);
2694                 return encoder;
2695         }
2696         DRM_ERROR("No encoder id\n");
2697         return NULL;
2698 }
2699
2700 static int get_modes(struct drm_connector *connector)
2701 {
2702         return amdgpu_dm_connector_get_modes(connector);
2703 }
2704
2705 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2706 {
2707         struct dc_sink_init_data init_params = {
2708                         .link = aconnector->dc_link,
2709                         .sink_signal = SIGNAL_TYPE_VIRTUAL
2710         };
2711         struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
2712
2713         if (!aconnector->base.edid_blob_ptr ||
2714                 !aconnector->base.edid_blob_ptr->data) {
2715                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
2716                                 aconnector->base.name);
2717
2718                 aconnector->base.force = DRM_FORCE_OFF;
2719                 aconnector->base.override_edid = false;
2720                 return;
2721         }
2722
2723         aconnector->edid = edid;
2724
2725         aconnector->dc_em_sink = dc_link_add_remote_sink(
2726                 aconnector->dc_link,
2727                 (uint8_t *)edid,
2728                 (edid->extensions + 1) * EDID_LENGTH,
2729                 &init_params);
2730
2731         if (aconnector->base.force == DRM_FORCE_ON)
2732                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2733                 aconnector->dc_link->local_sink :
2734                 aconnector->dc_em_sink;
2735 }
2736
2737 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2738 {
2739         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
2740
2741         /* In case of headless boot with force on for DP managed connector
2742          * Those settings have to be != 0 to get initial modeset
2743          */
2744         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
2745                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
2746                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
2747         }
2748
2749
2750         aconnector->base.override_edid = true;
2751         create_eml_sink(aconnector);
2752 }
2753
2754 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
2755                                    struct drm_display_mode *mode)
2756 {
2757         int result = MODE_ERROR;
2758         struct dc_sink *dc_sink;
2759         struct amdgpu_device *adev = connector->dev->dev_private;
2760         /* TODO: Unhardcode stream count */
2761         struct dc_stream_state *stream;
2762         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
2763
2764         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
2765                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
2766                 return result;
2767
2768         /* Only run this the first time mode_valid is called to initilialize
2769          * EDID mgmt
2770          */
2771         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
2772                 !aconnector->dc_em_sink)
2773                 handle_edid_mgmt(aconnector);
2774
2775         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
2776
2777         if (dc_sink == NULL) {
2778                 DRM_ERROR("dc_sink is NULL!\n");
2779                 goto fail;
2780         }
2781
2782         stream = dc_create_stream_for_sink(dc_sink);
2783         if (stream == NULL) {
2784                 DRM_ERROR("Failed to create stream for sink!\n");
2785                 goto fail;
2786         }
2787
2788         drm_mode_set_crtcinfo(mode, 0);
2789         fill_stream_properties_from_drm_display_mode(stream, mode, connector);
2790
2791         stream->src.width = mode->hdisplay;
2792         stream->src.height = mode->vdisplay;
2793         stream->dst = stream->src;
2794
2795         if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
2796                 result = MODE_OK;
2797
2798         dc_stream_release(stream);
2799
2800 fail:
2801         /* TODO: error handling*/
2802         return result;
2803 }
2804
2805 static const struct drm_connector_helper_funcs
2806 amdgpu_dm_connector_helper_funcs = {
2807         /*
2808          * If hotplug a second bigger display in FB Con mode, bigger resolution
2809          * modes will be filtered by drm_mode_validate_size(), and those modes
2810          * is missing after user start lightdm. So we need to renew modes list.
2811          * in get_modes call back, not just return the modes count
2812          */
2813         .get_modes = get_modes,
2814         .mode_valid = amdgpu_dm_connector_mode_valid,
2815         .best_encoder = best_encoder
2816 };
2817
2818 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
2819 {
2820 }
2821
2822 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
2823                                        struct drm_crtc_state *state)
2824 {
2825         struct amdgpu_device *adev = crtc->dev->dev_private;
2826         struct dc *dc = adev->dm.dc;
2827         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
2828         int ret = -EINVAL;
2829
2830         if (unlikely(!dm_crtc_state->stream &&
2831                      modeset_required(state, NULL, dm_crtc_state->stream))) {
2832                 WARN_ON(1);
2833                 return ret;
2834         }
2835
2836         /* In some use cases, like reset, no stream  is attached */
2837         if (!dm_crtc_state->stream)
2838                 return 0;
2839
2840         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
2841                 return 0;
2842
2843         return ret;
2844 }
2845
2846 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
2847                                       const struct drm_display_mode *mode,
2848                                       struct drm_display_mode *adjusted_mode)
2849 {
2850         return true;
2851 }
2852
2853 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
2854         .disable = dm_crtc_helper_disable,
2855         .atomic_check = dm_crtc_helper_atomic_check,
2856         .mode_fixup = dm_crtc_helper_mode_fixup
2857 };
2858
2859 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
2860 {
2861
2862 }
2863
2864 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
2865                                           struct drm_crtc_state *crtc_state,
2866                                           struct drm_connector_state *conn_state)
2867 {
2868         return 0;
2869 }
2870
2871 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
2872         .disable = dm_encoder_helper_disable,
2873         .atomic_check = dm_encoder_helper_atomic_check
2874 };
2875
2876 static void dm_drm_plane_reset(struct drm_plane *plane)
2877 {
2878         struct dm_plane_state *amdgpu_state = NULL;
2879
2880         if (plane->state)
2881                 plane->funcs->atomic_destroy_state(plane, plane->state);
2882
2883         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
2884         WARN_ON(amdgpu_state == NULL);
2885         
2886         if (amdgpu_state) {
2887                 plane->state = &amdgpu_state->base;
2888                 plane->state->plane = plane;
2889                 plane->state->rotation = DRM_MODE_ROTATE_0;
2890         }
2891 }
2892
2893 static struct drm_plane_state *
2894 dm_drm_plane_duplicate_state(struct drm_plane *plane)
2895 {
2896         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
2897
2898         old_dm_plane_state = to_dm_plane_state(plane->state);
2899         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
2900         if (!dm_plane_state)
2901                 return NULL;
2902
2903         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
2904
2905         if (old_dm_plane_state->dc_state) {
2906                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
2907                 dc_plane_state_retain(dm_plane_state->dc_state);
2908         }
2909
2910         return &dm_plane_state->base;
2911 }
2912
2913 void dm_drm_plane_destroy_state(struct drm_plane *plane,
2914                                 struct drm_plane_state *state)
2915 {
2916         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
2917
2918         if (dm_plane_state->dc_state)
2919                 dc_plane_state_release(dm_plane_state->dc_state);
2920
2921         drm_atomic_helper_plane_destroy_state(plane, state);
2922 }
2923
2924 static const struct drm_plane_funcs dm_plane_funcs = {
2925         .update_plane   = drm_atomic_helper_update_plane,
2926         .disable_plane  = drm_atomic_helper_disable_plane,
2927         .destroy        = drm_plane_cleanup,
2928         .reset = dm_drm_plane_reset,
2929         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
2930         .atomic_destroy_state = dm_drm_plane_destroy_state,
2931 };
2932
2933 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
2934                                       struct drm_plane_state *new_state)
2935 {
2936         struct amdgpu_framebuffer *afb;
2937         struct drm_gem_object *obj;
2938         struct amdgpu_bo *rbo;
2939         uint64_t chroma_addr = 0;
2940         int r;
2941         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
2942         unsigned int awidth;
2943
2944         dm_plane_state_old = to_dm_plane_state(plane->state);
2945         dm_plane_state_new = to_dm_plane_state(new_state);
2946
2947         if (!new_state->fb) {
2948                 DRM_DEBUG_DRIVER("No FB bound\n");
2949                 return 0;
2950         }
2951
2952         afb = to_amdgpu_framebuffer(new_state->fb);
2953
2954         obj = afb->obj;
2955         rbo = gem_to_amdgpu_bo(obj);
2956         r = amdgpu_bo_reserve(rbo, false);
2957         if (unlikely(r != 0))
2958                 return r;
2959
2960         r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
2961
2962
2963         amdgpu_bo_unreserve(rbo);
2964
2965         if (unlikely(r != 0)) {
2966                 if (r != -ERESTARTSYS)
2967                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
2968                 return r;
2969         }
2970
2971         amdgpu_bo_ref(rbo);
2972
2973         if (dm_plane_state_new->dc_state &&
2974                         dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
2975                 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
2976
2977                 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
2978                         plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
2979                         plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
2980                 } else {
2981                         awidth = ALIGN(new_state->fb->width, 64);
2982                         plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
2983                         plane_state->address.video_progressive.luma_addr.low_part
2984                                                         = lower_32_bits(afb->address);
2985                         plane_state->address.video_progressive.luma_addr.high_part
2986                                                         = upper_32_bits(afb->address);
2987                         chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
2988                         plane_state->address.video_progressive.chroma_addr.low_part
2989                                                         = lower_32_bits(chroma_addr);
2990                         plane_state->address.video_progressive.chroma_addr.high_part
2991                                                         = upper_32_bits(chroma_addr);
2992                 }
2993         }
2994
2995         /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
2996          * prepare and cleanup in drm_atomic_helper_prepare_planes
2997          * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
2998          * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
2999          * code touching fram buffers should be avoided for DC.
3000          */
3001         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3002                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
3003
3004                 acrtc->cursor_bo = obj;
3005         }
3006         return 0;
3007 }
3008
3009 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
3010                                        struct drm_plane_state *old_state)
3011 {
3012         struct amdgpu_bo *rbo;
3013         struct amdgpu_framebuffer *afb;
3014         int r;
3015
3016         if (!old_state->fb)
3017                 return;
3018
3019         afb = to_amdgpu_framebuffer(old_state->fb);
3020         rbo = gem_to_amdgpu_bo(afb->obj);
3021         r = amdgpu_bo_reserve(rbo, false);
3022         if (unlikely(r)) {
3023                 DRM_ERROR("failed to reserve rbo before unpin\n");
3024                 return;
3025         }
3026
3027         amdgpu_bo_unpin(rbo);
3028         amdgpu_bo_unreserve(rbo);
3029         amdgpu_bo_unref(&rbo);
3030 }
3031
3032 static int dm_plane_atomic_check(struct drm_plane *plane,
3033                                  struct drm_plane_state *state)
3034 {
3035         struct amdgpu_device *adev = plane->dev->dev_private;
3036         struct dc *dc = adev->dm.dc;
3037         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
3038
3039         if (!dm_plane_state->dc_state)
3040                 return 0;
3041
3042         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
3043                 return 0;
3044
3045         return -EINVAL;
3046 }
3047
3048 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
3049         .prepare_fb = dm_plane_helper_prepare_fb,
3050         .cleanup_fb = dm_plane_helper_cleanup_fb,
3051         .atomic_check = dm_plane_atomic_check,
3052 };
3053
3054 /*
3055  * TODO: these are currently initialized to rgb formats only.
3056  * For future use cases we should either initialize them dynamically based on
3057  * plane capabilities, or initialize this array to all formats, so internal drm
3058  * check will succeed, and let DC to implement proper check
3059  */
3060 static const uint32_t rgb_formats[] = {
3061         DRM_FORMAT_RGB888,
3062         DRM_FORMAT_XRGB8888,
3063         DRM_FORMAT_ARGB8888,
3064         DRM_FORMAT_RGBA8888,
3065         DRM_FORMAT_XRGB2101010,
3066         DRM_FORMAT_XBGR2101010,
3067         DRM_FORMAT_ARGB2101010,
3068         DRM_FORMAT_ABGR2101010,
3069 };
3070
3071 static const uint32_t yuv_formats[] = {
3072         DRM_FORMAT_NV12,
3073         DRM_FORMAT_NV21,
3074 };
3075
3076 static const u32 cursor_formats[] = {
3077         DRM_FORMAT_ARGB8888
3078 };
3079
3080 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3081                                 struct amdgpu_plane *aplane,
3082                                 unsigned long possible_crtcs)
3083 {
3084         int res = -EPERM;
3085
3086         switch (aplane->base.type) {
3087         case DRM_PLANE_TYPE_PRIMARY:
3088                 aplane->base.format_default = true;
3089
3090                 res = drm_universal_plane_init(
3091                                 dm->adev->ddev,
3092                                 &aplane->base,
3093                                 possible_crtcs,
3094                                 &dm_plane_funcs,
3095                                 rgb_formats,
3096                                 ARRAY_SIZE(rgb_formats),
3097                                 NULL, aplane->base.type, NULL);
3098                 break;
3099         case DRM_PLANE_TYPE_OVERLAY:
3100                 res = drm_universal_plane_init(
3101                                 dm->adev->ddev,
3102                                 &aplane->base,
3103                                 possible_crtcs,
3104                                 &dm_plane_funcs,
3105                                 yuv_formats,
3106                                 ARRAY_SIZE(yuv_formats),
3107                                 NULL, aplane->base.type, NULL);
3108                 break;
3109         case DRM_PLANE_TYPE_CURSOR:
3110                 res = drm_universal_plane_init(
3111                                 dm->adev->ddev,
3112                                 &aplane->base,
3113                                 possible_crtcs,
3114                                 &dm_plane_funcs,
3115                                 cursor_formats,
3116                                 ARRAY_SIZE(cursor_formats),
3117                                 NULL, aplane->base.type, NULL);
3118                 break;
3119         }
3120
3121         drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
3122
3123         /* Create (reset) the plane state */
3124         if (aplane->base.funcs->reset)
3125                 aplane->base.funcs->reset(&aplane->base);
3126
3127
3128         return res;
3129 }
3130
3131 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
3132                                struct drm_plane *plane,
3133                                uint32_t crtc_index)
3134 {
3135         struct amdgpu_crtc *acrtc = NULL;
3136         struct amdgpu_plane *cursor_plane;
3137
3138         int res = -ENOMEM;
3139
3140         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
3141         if (!cursor_plane)
3142                 goto fail;
3143
3144         cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
3145         res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
3146
3147         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
3148         if (!acrtc)
3149                 goto fail;
3150
3151         res = drm_crtc_init_with_planes(
3152                         dm->ddev,
3153                         &acrtc->base,
3154                         plane,
3155                         &cursor_plane->base,
3156                         &amdgpu_dm_crtc_funcs, NULL);
3157
3158         if (res)
3159                 goto fail;
3160
3161         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
3162
3163         /* Create (reset) the plane state */
3164         if (acrtc->base.funcs->reset)
3165                 acrtc->base.funcs->reset(&acrtc->base);
3166
3167         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
3168         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
3169
3170         acrtc->crtc_id = crtc_index;
3171         acrtc->base.enabled = false;
3172
3173         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
3174         drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
3175
3176         return 0;
3177
3178 fail:
3179         kfree(acrtc);
3180         kfree(cursor_plane);
3181         return res;
3182 }
3183
3184
3185 static int to_drm_connector_type(enum signal_type st)
3186 {
3187         switch (st) {
3188         case SIGNAL_TYPE_HDMI_TYPE_A:
3189                 return DRM_MODE_CONNECTOR_HDMIA;
3190         case SIGNAL_TYPE_EDP:
3191                 return DRM_MODE_CONNECTOR_eDP;
3192         case SIGNAL_TYPE_RGB:
3193                 return DRM_MODE_CONNECTOR_VGA;
3194         case SIGNAL_TYPE_DISPLAY_PORT:
3195         case SIGNAL_TYPE_DISPLAY_PORT_MST:
3196                 return DRM_MODE_CONNECTOR_DisplayPort;
3197         case SIGNAL_TYPE_DVI_DUAL_LINK:
3198         case SIGNAL_TYPE_DVI_SINGLE_LINK:
3199                 return DRM_MODE_CONNECTOR_DVID;
3200         case SIGNAL_TYPE_VIRTUAL:
3201                 return DRM_MODE_CONNECTOR_VIRTUAL;
3202
3203         default:
3204                 return DRM_MODE_CONNECTOR_Unknown;
3205         }
3206 }
3207
3208 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
3209 {
3210         const struct drm_connector_helper_funcs *helper =
3211                 connector->helper_private;
3212         struct drm_encoder *encoder;
3213         struct amdgpu_encoder *amdgpu_encoder;
3214
3215         encoder = helper->best_encoder(connector);
3216
3217         if (encoder == NULL)
3218                 return;
3219
3220         amdgpu_encoder = to_amdgpu_encoder(encoder);
3221
3222         amdgpu_encoder->native_mode.clock = 0;
3223
3224         if (!list_empty(&connector->probed_modes)) {
3225                 struct drm_display_mode *preferred_mode = NULL;
3226
3227                 list_for_each_entry(preferred_mode,
3228                                     &connector->probed_modes,
3229                                     head) {
3230                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
3231                                 amdgpu_encoder->native_mode = *preferred_mode;
3232
3233                         break;
3234                 }
3235
3236         }
3237 }
3238
3239 static struct drm_display_mode *
3240 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
3241                              char *name,
3242                              int hdisplay, int vdisplay)
3243 {
3244         struct drm_device *dev = encoder->dev;
3245         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3246         struct drm_display_mode *mode = NULL;
3247         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3248
3249         mode = drm_mode_duplicate(dev, native_mode);
3250
3251         if (mode == NULL)
3252                 return NULL;
3253
3254         mode->hdisplay = hdisplay;
3255         mode->vdisplay = vdisplay;
3256         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
3257         strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
3258
3259         return mode;
3260
3261 }
3262
3263 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
3264                                                  struct drm_connector *connector)
3265 {
3266         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3267         struct drm_display_mode *mode = NULL;
3268         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
3269         struct amdgpu_dm_connector *amdgpu_dm_connector =
3270                                 to_amdgpu_dm_connector(connector);
3271         int i;
3272         int n;
3273         struct mode_size {
3274                 char name[DRM_DISPLAY_MODE_LEN];
3275                 int w;
3276                 int h;
3277         } common_modes[] = {
3278                 {  "640x480",  640,  480},
3279                 {  "800x600",  800,  600},
3280                 { "1024x768", 1024,  768},
3281                 { "1280x720", 1280,  720},
3282                 { "1280x800", 1280,  800},
3283                 {"1280x1024", 1280, 1024},
3284                 { "1440x900", 1440,  900},
3285                 {"1680x1050", 1680, 1050},
3286                 {"1600x1200", 1600, 1200},
3287                 {"1920x1080", 1920, 1080},
3288                 {"1920x1200", 1920, 1200}
3289         };
3290
3291         n = ARRAY_SIZE(common_modes);
3292
3293         for (i = 0; i < n; i++) {
3294                 struct drm_display_mode *curmode = NULL;
3295                 bool mode_existed = false;
3296
3297                 if (common_modes[i].w > native_mode->hdisplay ||
3298                     common_modes[i].h > native_mode->vdisplay ||
3299                    (common_modes[i].w == native_mode->hdisplay &&
3300                     common_modes[i].h == native_mode->vdisplay))
3301                         continue;
3302
3303                 list_for_each_entry(curmode, &connector->probed_modes, head) {
3304                         if (common_modes[i].w == curmode->hdisplay &&
3305                             common_modes[i].h == curmode->vdisplay) {
3306                                 mode_existed = true;
3307                                 break;
3308                         }
3309                 }
3310
3311                 if (mode_existed)
3312                         continue;
3313
3314                 mode = amdgpu_dm_create_common_mode(encoder,
3315                                 common_modes[i].name, common_modes[i].w,
3316                                 common_modes[i].h);
3317                 drm_mode_probed_add(connector, mode);
3318                 amdgpu_dm_connector->num_modes++;
3319         }
3320 }
3321
3322 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
3323                                               struct edid *edid)
3324 {
3325         struct amdgpu_dm_connector *amdgpu_dm_connector =
3326                         to_amdgpu_dm_connector(connector);
3327
3328         if (edid) {
3329                 /* empty probed_modes */
3330                 INIT_LIST_HEAD(&connector->probed_modes);
3331                 amdgpu_dm_connector->num_modes =
3332                                 drm_add_edid_modes(connector, edid);
3333
3334                 drm_edid_to_eld(connector, edid);
3335
3336                 amdgpu_dm_get_native_mode(connector);
3337         } else {
3338                 amdgpu_dm_connector->num_modes = 0;
3339         }
3340 }
3341
3342 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
3343 {
3344         const struct drm_connector_helper_funcs *helper =
3345                         connector->helper_private;
3346         struct amdgpu_dm_connector *amdgpu_dm_connector =
3347                         to_amdgpu_dm_connector(connector);
3348         struct drm_encoder *encoder;
3349         struct edid *edid = amdgpu_dm_connector->edid;
3350
3351         encoder = helper->best_encoder(connector);
3352
3353         amdgpu_dm_connector_ddc_get_modes(connector, edid);
3354         amdgpu_dm_connector_add_common_modes(encoder, connector);
3355         return amdgpu_dm_connector->num_modes;
3356 }
3357
3358 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
3359                                      struct amdgpu_dm_connector *aconnector,
3360                                      int connector_type,
3361                                      struct dc_link *link,
3362                                      int link_index)
3363 {
3364         struct amdgpu_device *adev = dm->ddev->dev_private;
3365
3366         aconnector->connector_id = link_index;
3367         aconnector->dc_link = link;
3368         aconnector->base.interlace_allowed = false;
3369         aconnector->base.doublescan_allowed = false;
3370         aconnector->base.stereo_allowed = false;
3371         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
3372         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
3373
3374         mutex_init(&aconnector->hpd_lock);
3375
3376         /* configure support HPD hot plug connector_>polled default value is 0
3377          * which means HPD hot plug not supported
3378          */
3379         switch (connector_type) {
3380         case DRM_MODE_CONNECTOR_HDMIA:
3381                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3382                 break;
3383         case DRM_MODE_CONNECTOR_DisplayPort:
3384                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3385                 break;
3386         case DRM_MODE_CONNECTOR_DVID:
3387                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
3388                 break;
3389         default:
3390                 break;
3391         }
3392
3393         drm_object_attach_property(&aconnector->base.base,
3394                                 dm->ddev->mode_config.scaling_mode_property,
3395                                 DRM_MODE_SCALE_NONE);
3396
3397         drm_object_attach_property(&aconnector->base.base,
3398                                 adev->mode_info.underscan_property,
3399                                 UNDERSCAN_OFF);
3400         drm_object_attach_property(&aconnector->base.base,
3401                                 adev->mode_info.underscan_hborder_property,
3402                                 0);
3403         drm_object_attach_property(&aconnector->base.base,
3404                                 adev->mode_info.underscan_vborder_property,
3405                                 0);
3406
3407 }
3408
3409 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
3410                               struct i2c_msg *msgs, int num)
3411 {
3412         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
3413         struct ddc_service *ddc_service = i2c->ddc_service;
3414         struct i2c_command cmd;
3415         int i;
3416         int result = -EIO;
3417
3418         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
3419
3420         if (!cmd.payloads)
3421                 return result;
3422
3423         cmd.number_of_payloads = num;
3424         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
3425         cmd.speed = 100;
3426
3427         for (i = 0; i < num; i++) {
3428                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
3429                 cmd.payloads[i].address = msgs[i].addr;
3430                 cmd.payloads[i].length = msgs[i].len;
3431                 cmd.payloads[i].data = msgs[i].buf;
3432         }
3433
3434         if (dal_i2caux_submit_i2c_command(
3435                         ddc_service->ctx->i2caux,
3436                         ddc_service->ddc_pin,
3437                         &cmd))
3438                 result = num;
3439
3440         kfree(cmd.payloads);
3441         return result;
3442 }
3443
3444 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
3445 {
3446         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
3447 }
3448
3449 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
3450         .master_xfer = amdgpu_dm_i2c_xfer,
3451         .functionality = amdgpu_dm_i2c_func,
3452 };
3453
3454 static struct amdgpu_i2c_adapter *
3455 create_i2c(struct ddc_service *ddc_service,
3456            int link_index,
3457            int *res)
3458 {
3459         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
3460         struct amdgpu_i2c_adapter *i2c;
3461
3462         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
3463         if (!i2c)
3464                 return NULL;
3465         i2c->base.owner = THIS_MODULE;
3466         i2c->base.class = I2C_CLASS_DDC;
3467         i2c->base.dev.parent = &adev->pdev->dev;
3468         i2c->base.algo = &amdgpu_dm_i2c_algo;
3469         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
3470         i2c_set_adapdata(&i2c->base, i2c);
3471         i2c->ddc_service = ddc_service;
3472
3473         return i2c;
3474 }
3475
3476 /* Note: this function assumes that dc_link_detect() was called for the
3477  * dc_link which will be represented by this aconnector.
3478  */
3479 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
3480                                     struct amdgpu_dm_connector *aconnector,
3481                                     uint32_t link_index,
3482                                     struct amdgpu_encoder *aencoder)
3483 {
3484         int res = 0;
3485         int connector_type;
3486         struct dc *dc = dm->dc;
3487         struct dc_link *link = dc_get_link_at_index(dc, link_index);
3488         struct amdgpu_i2c_adapter *i2c;
3489
3490         link->priv = aconnector;
3491
3492         DRM_DEBUG_DRIVER("%s()\n", __func__);
3493
3494         i2c = create_i2c(link->ddc, link->link_index, &res);
3495         if (!i2c) {
3496                 DRM_ERROR("Failed to create i2c adapter data\n");
3497                 return -ENOMEM;
3498         }
3499
3500         aconnector->i2c = i2c;
3501         res = i2c_add_adapter(&i2c->base);
3502
3503         if (res) {
3504                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
3505                 goto out_free;
3506         }
3507
3508         connector_type = to_drm_connector_type(link->connector_signal);
3509
3510         res = drm_connector_init(
3511                         dm->ddev,
3512                         &aconnector->base,
3513                         &amdgpu_dm_connector_funcs,
3514                         connector_type);
3515
3516         if (res) {
3517                 DRM_ERROR("connector_init failed\n");
3518                 aconnector->connector_id = -1;
3519                 goto out_free;
3520         }
3521
3522         drm_connector_helper_add(
3523                         &aconnector->base,
3524                         &amdgpu_dm_connector_helper_funcs);
3525
3526         if (aconnector->base.funcs->reset)
3527                 aconnector->base.funcs->reset(&aconnector->base);
3528
3529         amdgpu_dm_connector_init_helper(
3530                 dm,
3531                 aconnector,
3532                 connector_type,
3533                 link,
3534                 link_index);
3535
3536         drm_mode_connector_attach_encoder(
3537                 &aconnector->base, &aencoder->base);
3538
3539         drm_connector_register(&aconnector->base);
3540
3541         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
3542                 || connector_type == DRM_MODE_CONNECTOR_eDP)
3543                 amdgpu_dm_initialize_dp_connector(dm, aconnector);
3544
3545 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3546         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3547
3548         /* NOTE: this currently will create backlight device even if a panel
3549          * is not connected to the eDP/LVDS connector.
3550          *
3551          * This is less than ideal but we don't have sink information at this
3552          * stage since detection happens after. We can't do detection earlier
3553          * since MST detection needs connectors to be created first.
3554          */
3555         if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
3556                 /* Event if registration failed, we should continue with
3557                  * DM initialization because not having a backlight control
3558                  * is better then a black screen.
3559                  */
3560                 amdgpu_dm_register_backlight_device(dm);
3561
3562                 if (dm->backlight_dev)
3563                         dm->backlight_link = link;
3564         }
3565 #endif
3566
3567 out_free:
3568         if (res) {
3569                 kfree(i2c);
3570                 aconnector->i2c = NULL;
3571         }
3572         return res;
3573 }
3574
3575 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
3576 {
3577         switch (adev->mode_info.num_crtc) {
3578         case 1:
3579                 return 0x1;
3580         case 2:
3581                 return 0x3;
3582         case 3:
3583                 return 0x7;
3584         case 4:
3585                 return 0xf;
3586         case 5:
3587                 return 0x1f;
3588         case 6:
3589         default:
3590                 return 0x3f;
3591         }
3592 }
3593
3594 static int amdgpu_dm_encoder_init(struct drm_device *dev,
3595                                   struct amdgpu_encoder *aencoder,
3596                                   uint32_t link_index)
3597 {
3598         struct amdgpu_device *adev = dev->dev_private;
3599
3600         int res = drm_encoder_init(dev,
3601                                    &aencoder->base,
3602                                    &amdgpu_dm_encoder_funcs,
3603                                    DRM_MODE_ENCODER_TMDS,
3604                                    NULL);
3605
3606         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
3607
3608         if (!res)
3609                 aencoder->encoder_id = link_index;
3610         else
3611                 aencoder->encoder_id = -1;
3612
3613         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
3614
3615         return res;
3616 }
3617
3618 static void manage_dm_interrupts(struct amdgpu_device *adev,
3619                                  struct amdgpu_crtc *acrtc,
3620                                  bool enable)
3621 {
3622         /*
3623          * this is not correct translation but will work as soon as VBLANK
3624          * constant is the same as PFLIP
3625          */
3626         int irq_type =
3627                 amdgpu_crtc_idx_to_irq_type(
3628                         adev,
3629                         acrtc->crtc_id);
3630
3631         if (enable) {
3632                 drm_crtc_vblank_on(&acrtc->base);
3633                 amdgpu_irq_get(
3634                         adev,
3635                         &adev->pageflip_irq,
3636                         irq_type);
3637         } else {
3638
3639                 amdgpu_irq_put(
3640                         adev,
3641                         &adev->pageflip_irq,
3642                         irq_type);
3643                 drm_crtc_vblank_off(&acrtc->base);
3644         }
3645 }
3646
3647 static bool
3648 is_scaling_state_different(const struct dm_connector_state *dm_state,
3649                            const struct dm_connector_state *old_dm_state)
3650 {
3651         if (dm_state->scaling != old_dm_state->scaling)
3652                 return true;
3653         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
3654                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
3655                         return true;
3656         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
3657                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
3658                         return true;
3659         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
3660                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
3661                 return true;
3662         return false;
3663 }
3664
3665 static void remove_stream(struct amdgpu_device *adev,
3666                           struct amdgpu_crtc *acrtc,
3667                           struct dc_stream_state *stream)
3668 {
3669         /* this is the update mode case */
3670         if (adev->dm.freesync_module)
3671                 mod_freesync_remove_stream(adev->dm.freesync_module, stream);
3672
3673         acrtc->otg_inst = -1;
3674         acrtc->enabled = false;
3675 }
3676
3677 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
3678                                struct dc_cursor_position *position)
3679 {
3680         struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
3681         int x, y;
3682         int xorigin = 0, yorigin = 0;
3683
3684         if (!crtc || !plane->state->fb) {
3685                 position->enable = false;
3686                 position->x = 0;
3687                 position->y = 0;
3688                 return 0;
3689         }
3690
3691         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
3692             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
3693                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
3694                           __func__,
3695                           plane->state->crtc_w,
3696                           plane->state->crtc_h);
3697                 return -EINVAL;
3698         }
3699
3700         x = plane->state->crtc_x;
3701         y = plane->state->crtc_y;
3702         /* avivo cursor are offset into the total surface */
3703         x += crtc->primary->state->src_x >> 16;
3704         y += crtc->primary->state->src_y >> 16;
3705         if (x < 0) {
3706                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
3707                 x = 0;
3708         }
3709         if (y < 0) {
3710                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
3711                 y = 0;
3712         }
3713         position->enable = true;
3714         position->x = x;
3715         position->y = y;
3716         position->x_hotspot = xorigin;
3717         position->y_hotspot = yorigin;
3718
3719         return 0;
3720 }
3721
3722 static void handle_cursor_update(struct drm_plane *plane,
3723                                  struct drm_plane_state *old_plane_state)
3724 {
3725         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
3726         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
3727         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
3728         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
3729         uint64_t address = afb ? afb->address : 0;
3730         struct dc_cursor_position position;
3731         struct dc_cursor_attributes attributes;
3732         int ret;
3733
3734         if (!plane->state->fb && !old_plane_state->fb)
3735                 return;
3736
3737         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
3738                          __func__,
3739                          amdgpu_crtc->crtc_id,
3740                          plane->state->crtc_w,
3741                          plane->state->crtc_h);
3742
3743         ret = get_cursor_position(plane, crtc, &position);
3744         if (ret)
3745                 return;
3746
3747         if (!position.enable) {
3748                 /* turn off cursor */
3749                 if (crtc_state && crtc_state->stream)
3750                         dc_stream_set_cursor_position(crtc_state->stream,
3751                                                       &position);
3752                 return;
3753         }
3754
3755         amdgpu_crtc->cursor_width = plane->state->crtc_w;
3756         amdgpu_crtc->cursor_height = plane->state->crtc_h;
3757
3758         attributes.address.high_part = upper_32_bits(address);
3759         attributes.address.low_part  = lower_32_bits(address);
3760         attributes.width             = plane->state->crtc_w;
3761         attributes.height            = plane->state->crtc_h;
3762         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
3763         attributes.rotation_angle    = 0;
3764         attributes.attribute_flags.value = 0;
3765
3766         attributes.pitch = attributes.width;
3767
3768         if (crtc_state->stream) {
3769                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
3770                                                          &attributes))
3771                         DRM_ERROR("DC failed to set cursor attributes\n");
3772
3773                 if (!dc_stream_set_cursor_position(crtc_state->stream,
3774                                                    &position))
3775                         DRM_ERROR("DC failed to set cursor position\n");
3776         }
3777 }
3778
3779 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
3780 {
3781
3782         assert_spin_locked(&acrtc->base.dev->event_lock);
3783         WARN_ON(acrtc->event);
3784
3785         acrtc->event = acrtc->base.state->event;
3786
3787         /* Set the flip status */
3788         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
3789
3790         /* Mark this event as consumed */
3791         acrtc->base.state->event = NULL;
3792
3793         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
3794                                                  acrtc->crtc_id);
3795 }
3796
3797 /*
3798  * Executes flip
3799  *
3800  * Waits on all BO's fences and for proper vblank count
3801  */
3802 static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
3803                               struct drm_framebuffer *fb,
3804                               uint32_t target,
3805                               struct dc_state *state)
3806 {
3807         unsigned long flags;
3808         uint32_t target_vblank;
3809         int r, vpos, hpos;
3810         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
3811         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
3812         struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
3813         struct amdgpu_device *adev = crtc->dev->dev_private;
3814         bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
3815         struct dc_flip_addrs addr = { {0} };
3816         /* TODO eliminate or rename surface_update */
3817         struct dc_surface_update surface_updates[1] = { {0} };
3818         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
3819
3820
3821         /* Prepare wait for target vblank early - before the fence-waits */
3822         target_vblank = target - drm_crtc_vblank_count(crtc) +
3823                         amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
3824
3825         /* TODO This might fail and hence better not used, wait
3826          * explicitly on fences instead
3827          * and in general should be called for
3828          * blocking commit to as per framework helpers
3829          */
3830         r = amdgpu_bo_reserve(abo, true);
3831         if (unlikely(r != 0)) {
3832                 DRM_ERROR("failed to reserve buffer before flip\n");
3833                 WARN_ON(1);
3834         }
3835
3836         /* Wait for all fences on this FB */
3837         WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
3838                                                                     MAX_SCHEDULE_TIMEOUT) < 0);
3839
3840         amdgpu_bo_unreserve(abo);
3841
3842         /* Wait until we're out of the vertical blank period before the one
3843          * targeted by the flip
3844          */
3845         while ((acrtc->enabled &&
3846                 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
3847                                         &vpos, &hpos, NULL, NULL,
3848                                         &crtc->hwmode)
3849                  & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
3850                 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
3851                 (int)(target_vblank -
3852                   amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
3853                 usleep_range(1000, 1100);
3854         }
3855
3856         /* Flip */
3857         spin_lock_irqsave(&crtc->dev->event_lock, flags);
3858         /* update crtc fb */
3859         crtc->primary->fb = fb;
3860
3861         WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
3862         WARN_ON(!acrtc_state->stream);
3863
3864         addr.address.grph.addr.low_part = lower_32_bits(afb->address);
3865         addr.address.grph.addr.high_part = upper_32_bits(afb->address);
3866         addr.flip_immediate = async_flip;
3867
3868
3869         if (acrtc->base.state->event)
3870                 prepare_flip_isr(acrtc);
3871
3872         surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
3873         surface_updates->flip_addr = &addr;
3874
3875
3876         dc_commit_updates_for_stream(adev->dm.dc,
3877                                              surface_updates,
3878                                              1,
3879                                              acrtc_state->stream,
3880                                              NULL,
3881                                              &surface_updates->surface,
3882                                              state);
3883
3884         DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
3885                          __func__,
3886                          addr.address.grph.addr.high_part,
3887                          addr.address.grph.addr.low_part);
3888
3889
3890         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3891 }
3892
3893 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
3894                                     struct drm_device *dev,
3895                                     struct amdgpu_display_manager *dm,
3896                                     struct drm_crtc *pcrtc,
3897                                     bool *wait_for_vblank)
3898 {
3899         uint32_t i;
3900         struct drm_plane *plane;
3901         struct drm_plane_state *old_plane_state, *new_plane_state;
3902         struct dc_stream_state *dc_stream_attach;
3903         struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
3904         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
3905         struct drm_crtc_state *new_pcrtc_state =
3906                         drm_atomic_get_new_crtc_state(state, pcrtc);
3907         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
3908         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3909         int planes_count = 0;
3910         unsigned long flags;
3911
3912         /* update planes when needed */
3913         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
3914                 struct drm_crtc *crtc = new_plane_state->crtc;
3915                 struct drm_crtc_state *new_crtc_state;
3916                 struct drm_framebuffer *fb = new_plane_state->fb;
3917                 bool pflip_needed;
3918                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
3919
3920                 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3921                         handle_cursor_update(plane, old_plane_state);
3922                         continue;
3923                 }
3924
3925                 if (!fb || !crtc || pcrtc != crtc)
3926                         continue;
3927
3928                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
3929                 if (!new_crtc_state->active)
3930                         continue;
3931
3932                 pflip_needed = !state->allow_modeset;
3933
3934                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
3935                 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
3936                         DRM_ERROR("%s: acrtc %d, already busy\n",
3937                                   __func__,
3938                                   acrtc_attach->crtc_id);
3939                         /* In commit tail framework this cannot happen */
3940                         WARN_ON(1);
3941                 }
3942                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
3943
3944                 if (!pflip_needed) {
3945                         WARN_ON(!dm_new_plane_state->dc_state);
3946
3947                         plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
3948
3949                         dc_stream_attach = acrtc_state->stream;
3950                         planes_count++;
3951
3952                 } else if (new_crtc_state->planes_changed) {
3953                         /* Assume even ONE crtc with immediate flip means
3954                          * entire can't wait for VBLANK
3955                          * TODO Check if it's correct
3956                          */
3957                         *wait_for_vblank =
3958                                         new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
3959                                 false : true;
3960
3961                         /* TODO: Needs rework for multiplane flip */
3962                         if (plane->type == DRM_PLANE_TYPE_PRIMARY)
3963                                 drm_crtc_vblank_get(crtc);
3964
3965                         amdgpu_dm_do_flip(
3966                                 crtc,
3967                                 fb,
3968                                 drm_crtc_vblank_count(crtc) + *wait_for_vblank,
3969                                 dm_state->context);
3970                 }
3971
3972         }
3973
3974         if (planes_count) {
3975                 unsigned long flags;
3976
3977                 if (new_pcrtc_state->event) {
3978
3979                         drm_crtc_vblank_get(pcrtc);
3980
3981                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
3982                         prepare_flip_isr(acrtc_attach);
3983                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
3984                 }
3985
3986                 if (false == dc_commit_planes_to_stream(dm->dc,
3987                                                         plane_states_constructed,
3988                                                         planes_count,
3989                                                         dc_stream_attach,
3990                                                         dm_state->context))
3991                         dm_error("%s: Failed to attach plane!\n", __func__);
3992         } else {
3993                 /*TODO BUG Here should go disable planes on CRTC. */
3994         }
3995 }
3996
3997
3998 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
3999                                    struct drm_atomic_state *state,
4000                                    bool nonblock)
4001 {
4002         struct drm_crtc *crtc;
4003         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4004         struct amdgpu_device *adev = dev->dev_private;
4005         int i;
4006
4007         /*
4008          * We evade vblanks and pflips on crtc that
4009          * should be changed. We do it here to flush & disable
4010          * interrupts before drm_swap_state is called in drm_atomic_helper_commit
4011          * it will update crtc->dm_crtc_state->stream pointer which is used in
4012          * the ISRs.
4013          */
4014         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4015                 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4016                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4017
4018                 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
4019                         manage_dm_interrupts(adev, acrtc, false);
4020         }
4021         /* Add check here for SoC's that support hardware cursor plane, to
4022          * unset legacy_cursor_update */
4023
4024         return drm_atomic_helper_commit(dev, state, nonblock);
4025
4026         /*TODO Handle EINTR, reenable IRQ*/
4027 }
4028
4029 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4030 {
4031         struct drm_device *dev = state->dev;
4032         struct amdgpu_device *adev = dev->dev_private;
4033         struct amdgpu_display_manager *dm = &adev->dm;
4034         struct dm_atomic_state *dm_state;
4035         uint32_t i, j;
4036         uint32_t new_crtcs_count = 0;
4037         struct drm_crtc *crtc;
4038         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4039         struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
4040         struct dc_stream_state *new_stream = NULL;
4041         unsigned long flags;
4042         bool wait_for_vblank = true;
4043         struct drm_connector *connector;
4044         struct drm_connector_state *old_con_state, *new_con_state;
4045         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4046
4047         drm_atomic_helper_update_legacy_modeset_state(dev, state);
4048
4049         dm_state = to_dm_atomic_state(state);
4050
4051         /* update changed items */
4052         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4053                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4054
4055                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4056                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4057
4058                 DRM_DEBUG_DRIVER(
4059                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4060                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4061                         "connectors_changed:%d\n",
4062                         acrtc->crtc_id,
4063                         new_crtc_state->enable,
4064                         new_crtc_state->active,
4065                         new_crtc_state->planes_changed,
4066                         new_crtc_state->mode_changed,
4067                         new_crtc_state->active_changed,
4068                         new_crtc_state->connectors_changed);
4069
4070                 /* handles headless hotplug case, updating new_state and
4071                  * aconnector as needed
4072                  */
4073
4074                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
4075
4076                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
4077
4078                         if (!dm_new_crtc_state->stream) {
4079                                 /*
4080                                  * this could happen because of issues with
4081                                  * userspace notifications delivery.
4082                                  * In this case userspace tries to set mode on
4083                                  * display which is disconnect in fact.
4084                                  * dc_sink in NULL in this case on aconnector.
4085                                  * We expect reset mode will come soon.
4086                                  *
4087                                  * This can also happen when unplug is done
4088                                  * during resume sequence ended
4089                                  *
4090                                  * In this case, we want to pretend we still
4091                                  * have a sink to keep the pipe running so that
4092                                  * hw state is consistent with the sw state
4093                                  */
4094                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4095                                                 __func__, acrtc->base.base.id);
4096                                 continue;
4097                         }
4098
4099
4100                         if (dm_old_crtc_state->stream)
4101                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4102
4103
4104                         /*
4105                          * this loop saves set mode crtcs
4106                          * we needed to enable vblanks once all
4107                          * resources acquired in dc after dc_commit_streams
4108                          */
4109
4110                         /*TODO move all this into dm_crtc_state, get rid of
4111                          * new_crtcs array and use old and new atomic states
4112                          * instead
4113                          */
4114                         new_crtcs[new_crtcs_count] = acrtc;
4115                         new_crtcs_count++;
4116
4117                         new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
4118                         acrtc->enabled = true;
4119                         acrtc->hw_mode = new_crtc_state->mode;
4120                         crtc->hwmode = new_crtc_state->mode;
4121                 } else if (modereset_required(new_crtc_state)) {
4122                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
4123
4124                         /* i.e. reset mode */
4125                         if (dm_old_crtc_state->stream)
4126                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
4127                 }
4128         } /* for_each_crtc_in_state() */
4129
4130         /*
4131          * Add streams after required streams from new and replaced streams
4132          * are removed from freesync module
4133          */
4134         if (adev->dm.freesync_module) {
4135                 for (i = 0; i < new_crtcs_count; i++) {
4136                         struct amdgpu_dm_connector *aconnector = NULL;
4137
4138                         new_crtc_state = drm_atomic_get_new_crtc_state(state,
4139                                         &new_crtcs[i]->base);
4140                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4141
4142                         new_stream = dm_new_crtc_state->stream;
4143                         aconnector = amdgpu_dm_find_first_crtc_matching_connector(
4144                                         state,
4145                                         &new_crtcs[i]->base);
4146                         if (!aconnector) {
4147                                 DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
4148                                          "skipping freesync init\n",
4149                                          new_crtcs[i]->crtc_id);
4150                                 continue;
4151                         }
4152
4153                         mod_freesync_add_stream(adev->dm.freesync_module,
4154                                                 new_stream, &aconnector->caps);
4155                 }
4156         }
4157
4158         if (dm_state->context)
4159                 WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
4160
4161         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4162                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4163
4164                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4165
4166                 if (dm_new_crtc_state->stream != NULL) {
4167                         const struct dc_stream_status *status =
4168                                         dc_stream_get_status(dm_new_crtc_state->stream);
4169
4170                         if (!status)
4171                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4172                         else
4173                                 acrtc->otg_inst = status->primary_otg_inst;
4174                 }
4175         }
4176
4177         /* Handle scaling and underscan changes*/
4178         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4179                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4180                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4181                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4182                 struct dc_stream_status *status = NULL;
4183
4184                 if (acrtc)
4185                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4186
4187                 /* Skip any modesets/resets */
4188                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
4189                         continue;
4190
4191                 /* Skip any thing not scale or underscan changes */
4192                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4193                         continue;
4194
4195                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4196
4197                 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
4198                                 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
4199
4200                 status = dc_stream_get_status(dm_new_crtc_state->stream);
4201                 WARN_ON(!status);
4202                 WARN_ON(!status->plane_count);
4203
4204                 if (!dm_new_crtc_state->stream)
4205                         continue;
4206
4207                 /*TODO How it works with MPO ?*/
4208                 if (!dc_commit_planes_to_stream(
4209                                 dm->dc,
4210                                 status->plane_states,
4211                                 status->plane_count,
4212                                 dm_new_crtc_state->stream,
4213                                 dm_state->context))
4214                         dm_error("%s: Failed to update stream scaling!\n", __func__);
4215         }
4216
4217         for (i = 0; i < new_crtcs_count; i++) {
4218                 /*
4219                  * loop to enable interrupts on newly arrived crtc
4220                  */
4221                 struct amdgpu_crtc *acrtc = new_crtcs[i];
4222
4223                 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
4224                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4225
4226                 if (adev->dm.freesync_module)
4227                         mod_freesync_notify_mode_change(
4228                                 adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
4229
4230                 manage_dm_interrupts(adev, acrtc, true);
4231         }
4232
4233         /* update planes when needed per crtc*/
4234         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
4235                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4236
4237                 if (dm_new_crtc_state->stream)
4238                         amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
4239         }
4240
4241
4242         /*
4243          * send vblank event on all events not handled in flip and
4244          * mark consumed event for drm_atomic_helper_commit_hw_done
4245          */
4246         spin_lock_irqsave(&adev->ddev->event_lock, flags);
4247         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4248
4249                 if (new_crtc_state->event)
4250                         drm_send_event_locked(dev, &new_crtc_state->event->base);
4251
4252                 new_crtc_state->event = NULL;
4253         }
4254         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
4255
4256         /* Signal HW programming completion */
4257         drm_atomic_helper_commit_hw_done(state);
4258
4259         if (wait_for_vblank)
4260                 drm_atomic_helper_wait_for_vblanks(dev, state);
4261
4262         drm_atomic_helper_cleanup_planes(dev, state);
4263 }
4264
4265
4266 static int dm_force_atomic_commit(struct drm_connector *connector)
4267 {
4268         int ret = 0;
4269         struct drm_device *ddev = connector->dev;
4270         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
4271         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4272         struct drm_plane *plane = disconnected_acrtc->base.primary;
4273         struct drm_connector_state *conn_state;
4274         struct drm_crtc_state *crtc_state;
4275         struct drm_plane_state *plane_state;
4276
4277         if (!state)
4278                 return -ENOMEM;
4279
4280         state->acquire_ctx = ddev->mode_config.acquire_ctx;
4281
4282         /* Construct an atomic state to restore previous display setting */
4283
4284         /*
4285          * Attach connectors to drm_atomic_state
4286          */
4287         conn_state = drm_atomic_get_connector_state(state, connector);
4288
4289         ret = PTR_ERR_OR_ZERO(conn_state);
4290         if (ret)
4291                 goto err;
4292
4293         /* Attach crtc to drm_atomic_state*/
4294         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
4295
4296         ret = PTR_ERR_OR_ZERO(crtc_state);
4297         if (ret)
4298                 goto err;
4299
4300         /* force a restore */
4301         crtc_state->mode_changed = true;
4302
4303         /* Attach plane to drm_atomic_state */
4304         plane_state = drm_atomic_get_plane_state(state, plane);
4305
4306         ret = PTR_ERR_OR_ZERO(plane_state);
4307         if (ret)
4308                 goto err;
4309
4310
4311         /* Call commit internally with the state we just constructed */
4312         ret = drm_atomic_commit(state);
4313         if (!ret)
4314                 return 0;
4315
4316 err:
4317         DRM_ERROR("Restoring old state failed with %i\n", ret);
4318         drm_atomic_state_put(state);
4319
4320         return ret;
4321 }
4322
4323 /*
4324  * This functions handle all cases when set mode does not come upon hotplug.
4325  * This include when the same display is unplugged then plugged back into the
4326  * same port and when we are running without usermode desktop manager supprot
4327  */
4328 void dm_restore_drm_connector_state(struct drm_device *dev,
4329                                     struct drm_connector *connector)
4330 {
4331         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4332         struct amdgpu_crtc *disconnected_acrtc;
4333         struct dm_crtc_state *acrtc_state;
4334
4335         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
4336                 return;
4337
4338         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
4339         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
4340
4341         if (!disconnected_acrtc || !acrtc_state->stream)
4342                 return;
4343
4344         /*
4345          * If the previous sink is not released and different from the current,
4346          * we deduce we are in a state where we can not rely on usermode call
4347          * to turn on the display, so we do it here
4348          */
4349         if (acrtc_state->stream->sink != aconnector->dc_sink)
4350                 dm_force_atomic_commit(&aconnector->base);
4351 }
4352
4353 /*`
4354  * Grabs all modesetting locks to serialize against any blocking commits,
4355  * Waits for completion of all non blocking commits.
4356  */
4357 static int do_aquire_global_lock(struct drm_device *dev,
4358                                  struct drm_atomic_state *state)
4359 {
4360         struct drm_crtc *crtc;
4361         struct drm_crtc_commit *commit;
4362         long ret;
4363
4364         /* Adding all modeset locks to aquire_ctx will
4365          * ensure that when the framework release it the
4366          * extra locks we are locking here will get released to
4367          */
4368         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
4369         if (ret)
4370                 return ret;
4371
4372         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4373                 spin_lock(&crtc->commit_lock);
4374                 commit = list_first_entry_or_null(&crtc->commit_list,
4375                                 struct drm_crtc_commit, commit_entry);
4376                 if (commit)
4377                         drm_crtc_commit_get(commit);
4378                 spin_unlock(&crtc->commit_lock);
4379
4380                 if (!commit)
4381                         continue;
4382
4383                 /* Make sure all pending HW programming completed and
4384                  * page flips done
4385                  */
4386                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
4387
4388                 if (ret > 0)
4389                         ret = wait_for_completion_interruptible_timeout(
4390                                         &commit->flip_done, 10*HZ);
4391
4392                 if (ret == 0)
4393                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
4394                                   "timed out\n", crtc->base.id, crtc->name);
4395
4396                 drm_crtc_commit_put(commit);
4397         }
4398
4399         return ret < 0 ? ret : 0;
4400 }
4401
4402 static int dm_update_crtcs_state(struct dc *dc,
4403                                  struct drm_atomic_state *state,
4404                                  bool enable,
4405                                  bool *lock_and_validation_needed)
4406 {
4407         struct drm_crtc *crtc;
4408         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4409         int i;
4410         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
4411         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4412         struct dc_stream_state *new_stream;
4413         int ret = 0;
4414
4415         /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
4416         /* update changed items */
4417         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4418                 struct amdgpu_crtc *acrtc = NULL;
4419                 struct amdgpu_dm_connector *aconnector = NULL;
4420                 struct drm_connector_state *new_con_state = NULL;
4421                 struct dm_connector_state *dm_conn_state = NULL;
4422
4423                 new_stream = NULL;
4424
4425                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4426                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4427                 acrtc = to_amdgpu_crtc(crtc);
4428
4429                 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
4430
4431                 /* TODO This hack should go away */
4432                 if (aconnector && enable) {
4433                         // Make sure fake sink is created in plug-in scenario
4434                         new_con_state = drm_atomic_get_connector_state(state,
4435                                                                     &aconnector->base);
4436
4437                         if (IS_ERR(new_con_state)) {
4438                                 ret = PTR_ERR_OR_ZERO(new_con_state);
4439                                 break;
4440                         }
4441
4442                         dm_conn_state = to_dm_connector_state(new_con_state);
4443
4444                         new_stream = create_stream_for_sink(aconnector,
4445                                                              &new_crtc_state->mode,
4446                                                             dm_conn_state);
4447
4448                         /*
4449                          * we can have no stream on ACTION_SET if a display
4450                          * was disconnected during S3, in this case it not and
4451                          * error, the OS will be updated after detection, and
4452                          * do the right thing on next atomic commit
4453                          */
4454
4455                         if (!new_stream) {
4456                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
4457                                                 __func__, acrtc->base.base.id);
4458                                 break;
4459                         }
4460                 }
4461
4462                 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
4463                                 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
4464
4465                         new_crtc_state->mode_changed = false;
4466
4467                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
4468                                          new_crtc_state->mode_changed);
4469                 }
4470
4471
4472                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4473                         goto next_crtc;
4474
4475                 DRM_DEBUG_DRIVER(
4476                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
4477                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
4478                         "connectors_changed:%d\n",
4479                         acrtc->crtc_id,
4480                         new_crtc_state->enable,
4481                         new_crtc_state->active,
4482                         new_crtc_state->planes_changed,
4483                         new_crtc_state->mode_changed,
4484                         new_crtc_state->active_changed,
4485                         new_crtc_state->connectors_changed);
4486
4487                 /* Remove stream for any changed/disabled CRTC */
4488                 if (!enable) {
4489
4490                         if (!dm_old_crtc_state->stream)
4491                                 goto next_crtc;
4492
4493                         DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
4494                                         crtc->base.id);
4495
4496                         /* i.e. reset mode */
4497                         if (dc_remove_stream_from_ctx(
4498                                         dc,
4499                                         dm_state->context,
4500                                         dm_old_crtc_state->stream) != DC_OK) {
4501                                 ret = -EINVAL;
4502                                 goto fail;
4503                         }
4504
4505                         dc_stream_release(dm_old_crtc_state->stream);
4506                         dm_new_crtc_state->stream = NULL;
4507
4508                         *lock_and_validation_needed = true;
4509
4510                 } else {/* Add stream for any updated/enabled CRTC */
4511                         /*
4512                          * Quick fix to prevent NULL pointer on new_stream when
4513                          * added MST connectors not found in existing crtc_state in the chained mode
4514                          * TODO: need to dig out the root cause of that
4515                          */
4516                         if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
4517                                 goto next_crtc;
4518
4519                         if (modereset_required(new_crtc_state))
4520                                 goto next_crtc;
4521
4522                         if (modeset_required(new_crtc_state, new_stream,
4523                                              dm_old_crtc_state->stream)) {
4524
4525                                 WARN_ON(dm_new_crtc_state->stream);
4526
4527                                 dm_new_crtc_state->stream = new_stream;
4528                                 dc_stream_retain(new_stream);
4529
4530                                 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
4531                                                         crtc->base.id);
4532
4533                                 if (dc_add_stream_to_ctx(
4534                                                 dc,
4535                                                 dm_state->context,
4536                                                 dm_new_crtc_state->stream) != DC_OK) {
4537                                         ret = -EINVAL;
4538                                         goto fail;
4539                                 }
4540
4541                                 *lock_and_validation_needed = true;
4542                         }
4543                 }
4544
4545 next_crtc:
4546                 /* Release extra reference */
4547                 if (new_stream)
4548                          dc_stream_release(new_stream);
4549         }
4550
4551         return ret;
4552
4553 fail:
4554         if (new_stream)
4555                 dc_stream_release(new_stream);
4556         return ret;
4557 }
4558
4559 static int dm_update_planes_state(struct dc *dc,
4560                                   struct drm_atomic_state *state,
4561                                   bool enable,
4562                                   bool *lock_and_validation_needed)
4563 {
4564         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
4565         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4566         struct drm_plane *plane;
4567         struct drm_plane_state *old_plane_state, *new_plane_state;
4568         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
4569         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4570         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
4571         int i ;
4572         /* TODO return page_flip_needed() function */
4573         bool pflip_needed  = !state->allow_modeset;
4574         int ret = 0;
4575
4576         if (pflip_needed)
4577                 return ret;
4578
4579         /* Add new planes */
4580         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
4581                 new_plane_crtc = new_plane_state->crtc;
4582                 old_plane_crtc = old_plane_state->crtc;
4583                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
4584                 dm_old_plane_state = to_dm_plane_state(old_plane_state);
4585
4586                 /*TODO Implement atomic check for cursor plane */
4587                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
4588                         continue;
4589
4590                 /* Remove any changed/removed planes */
4591                 if (!enable) {
4592
4593                         if (!old_plane_crtc)
4594                                 continue;
4595
4596                         old_crtc_state = drm_atomic_get_old_crtc_state(
4597                                         state, old_plane_crtc);
4598                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
4599
4600                         if (!dm_old_crtc_state->stream)
4601                                 continue;
4602
4603                         DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
4604                                         plane->base.id, old_plane_crtc->base.id);
4605
4606                         if (!dc_remove_plane_from_context(
4607                                         dc,
4608                                         dm_old_crtc_state->stream,
4609                                         dm_old_plane_state->dc_state,
4610                                         dm_state->context)) {
4611
4612                                 ret = EINVAL;
4613                                 return ret;
4614                         }
4615
4616
4617                         dc_plane_state_release(dm_old_plane_state->dc_state);
4618                         dm_new_plane_state->dc_state = NULL;
4619
4620                         *lock_and_validation_needed = true;
4621
4622                 } else { /* Add new planes */
4623
4624                         if (drm_atomic_plane_disabling(plane->state, new_plane_state))
4625                                 continue;
4626
4627                         if (!new_plane_crtc)
4628                                 continue;
4629
4630                         new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
4631                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4632
4633                         if (!dm_new_crtc_state->stream)
4634                                 continue;
4635
4636
4637                         WARN_ON(dm_new_plane_state->dc_state);
4638
4639                         dm_new_plane_state->dc_state = dc_create_plane_state(dc);
4640
4641                         DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
4642                                         plane->base.id, new_plane_crtc->base.id);
4643
4644                         if (!dm_new_plane_state->dc_state) {
4645                                 ret = -EINVAL;
4646                                 return ret;
4647                         }
4648
4649                         ret = fill_plane_attributes(
4650                                 new_plane_crtc->dev->dev_private,
4651                                 dm_new_plane_state->dc_state,
4652                                 new_plane_state,
4653                                 new_crtc_state,
4654                                 false);
4655                         if (ret)
4656                                 return ret;
4657
4658
4659                         if (!dc_add_plane_to_context(
4660                                         dc,
4661                                         dm_new_crtc_state->stream,
4662                                         dm_new_plane_state->dc_state,
4663                                         dm_state->context)) {
4664
4665                                 ret = -EINVAL;
4666                                 return ret;
4667                         }
4668
4669                         *lock_and_validation_needed = true;
4670                 }
4671         }
4672
4673
4674         return ret;
4675 }
4676
4677 static int amdgpu_dm_atomic_check(struct drm_device *dev,
4678                                   struct drm_atomic_state *state)
4679 {
4680         int i;
4681         int ret;
4682         struct amdgpu_device *adev = dev->dev_private;
4683         struct dc *dc = adev->dm.dc;
4684         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4685         struct drm_connector *connector;
4686         struct drm_connector_state *old_con_state, *new_con_state;
4687         struct drm_crtc *crtc;
4688         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4689
4690         /*
4691          * This bool will be set for true for any modeset/reset
4692          * or plane update which implies non fast surface update.
4693          */
4694         bool lock_and_validation_needed = false;
4695
4696         ret = drm_atomic_helper_check_modeset(dev, state);
4697         if (ret)
4698                 goto fail;
4699
4700         /*
4701          * legacy_cursor_update should be made false for SoC's having
4702          * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
4703          * otherwise for software cursor plane,
4704          * we should not add it to list of affected planes.
4705          */
4706         if (state->legacy_cursor_update) {
4707                 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4708                         if (new_crtc_state->color_mgmt_changed) {
4709                                 ret = drm_atomic_add_affected_planes(state, crtc);
4710                                 if (ret)
4711                                         goto fail;
4712                         }
4713                 }
4714         } else {
4715                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4716                         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
4717                                 continue;
4718
4719                         if (!new_crtc_state->enable)
4720                                 continue;
4721
4722                         ret = drm_atomic_add_affected_connectors(state, crtc);
4723                         if (ret)
4724                                 return ret;
4725
4726                         ret = drm_atomic_add_affected_planes(state, crtc);
4727                         if (ret)
4728                                 goto fail;
4729                 }
4730         }
4731
4732         dm_state->context = dc_create_state();
4733         ASSERT(dm_state->context);
4734         dc_resource_state_copy_construct_current(dc, dm_state->context);
4735
4736         /* Remove exiting planes if they are modified */
4737         ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
4738         if (ret) {
4739                 goto fail;
4740         }
4741
4742         /* Disable all crtcs which require disable */
4743         ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
4744         if (ret) {
4745                 goto fail;
4746         }
4747
4748         /* Enable all crtcs which require enable */
4749         ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
4750         if (ret) {
4751                 goto fail;
4752         }
4753
4754         /* Add new/modified planes */
4755         ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
4756         if (ret) {
4757                 goto fail;
4758         }
4759
4760         /* Run this here since we want to validate the streams we created */
4761         ret = drm_atomic_helper_check_planes(dev, state);
4762         if (ret)
4763                 goto fail;
4764
4765         /* Check scaling and underscan changes*/
4766         /*TODO Removed scaling changes validation due to inability to commit
4767          * new stream into context w\o causing full reset. Need to
4768          * decide how to handle.
4769          */
4770         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
4771                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
4772                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
4773                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
4774
4775                 /* Skip any modesets/resets */
4776                 if (!acrtc || drm_atomic_crtc_needs_modeset(
4777                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
4778                         continue;
4779
4780                 /* Skip any thing not scale or underscan changes */
4781                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
4782                         continue;
4783
4784                 lock_and_validation_needed = true;
4785         }
4786
4787         /*
4788          * For full updates case when
4789          * removing/adding/updating  streams on once CRTC while flipping
4790          * on another CRTC,
4791          * acquiring global lock  will guarantee that any such full
4792          * update commit
4793          * will wait for completion of any outstanding flip using DRMs
4794          * synchronization events.
4795          */
4796
4797         if (lock_and_validation_needed) {
4798
4799                 ret = do_aquire_global_lock(dev, state);
4800                 if (ret)
4801                         goto fail;
4802
4803                 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
4804                         ret = -EINVAL;
4805                         goto fail;
4806                 }
4807         }
4808
4809         /* Must be success */
4810         WARN_ON(ret);
4811         return ret;
4812
4813 fail:
4814         if (ret == -EDEADLK)
4815                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
4816         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
4817                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
4818         else
4819                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
4820
4821         return ret;
4822 }
4823
4824 static bool is_dp_capable_without_timing_msa(struct dc *dc,
4825                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
4826 {
4827         uint8_t dpcd_data;
4828         bool capable = false;
4829
4830         if (amdgpu_dm_connector->dc_link &&
4831                 dm_helpers_dp_read_dpcd(
4832                                 NULL,
4833                                 amdgpu_dm_connector->dc_link,
4834                                 DP_DOWN_STREAM_PORT_COUNT,
4835                                 &dpcd_data,
4836                                 sizeof(dpcd_data))) {
4837                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
4838         }
4839
4840         return capable;
4841 }
4842 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
4843                                            struct edid *edid)
4844 {
4845         int i;
4846         uint64_t val_capable;
4847         bool edid_check_required;
4848         struct detailed_timing *timing;
4849         struct detailed_non_pixel *data;
4850         struct detailed_data_monitor_range *range;
4851         struct amdgpu_dm_connector *amdgpu_dm_connector =
4852                         to_amdgpu_dm_connector(connector);
4853
4854         struct drm_device *dev = connector->dev;
4855         struct amdgpu_device *adev = dev->dev_private;
4856
4857         edid_check_required = false;
4858         if (!amdgpu_dm_connector->dc_sink) {
4859                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
4860                 return;
4861         }
4862         if (!adev->dm.freesync_module)
4863                 return;
4864         /*
4865          * if edid non zero restrict freesync only for dp and edp
4866          */
4867         if (edid) {
4868                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
4869                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
4870                         edid_check_required = is_dp_capable_without_timing_msa(
4871                                                 adev->dm.dc,
4872                                                 amdgpu_dm_connector);
4873                 }
4874         }
4875         val_capable = 0;
4876         if (edid_check_required == true && (edid->version > 1 ||
4877            (edid->version == 1 && edid->revision > 1))) {
4878                 for (i = 0; i < 4; i++) {
4879
4880                         timing  = &edid->detailed_timings[i];
4881                         data    = &timing->data.other_data;
4882                         range   = &data->data.range;
4883                         /*
4884                          * Check if monitor has continuous frequency mode
4885                          */
4886                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
4887                                 continue;
4888                         /*
4889                          * Check for flag range limits only. If flag == 1 then
4890                          * no additional timing information provided.
4891                          * Default GTF, GTF Secondary curve and CVT are not
4892                          * supported
4893                          */
4894                         if (range->flags != 1)
4895                                 continue;
4896
4897                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
4898                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
4899                         amdgpu_dm_connector->pixel_clock_mhz =
4900                                 range->pixel_clock_mhz * 10;
4901                         break;
4902                 }
4903
4904                 if (amdgpu_dm_connector->max_vfreq -
4905                                 amdgpu_dm_connector->min_vfreq > 10) {
4906                         amdgpu_dm_connector->caps.supported = true;
4907                         amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
4908                                         amdgpu_dm_connector->min_vfreq * 1000000;
4909                         amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
4910                                         amdgpu_dm_connector->max_vfreq * 1000000;
4911                                 val_capable = 1;
4912                 }
4913         }
4914
4915         /*
4916          * TODO figure out how to notify user-mode or DRM of freesync caps
4917          * once we figure out how to deal with freesync in an upstreamable
4918          * fashion
4919          */
4920
4921 }
4922
4923 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
4924 {
4925         /*
4926          * TODO fill in once we figure out how to deal with freesync in
4927          * an upstreamable fashion
4928          */
4929 }