Merge tag 'drm-misc-next-2023-01-19' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 2527833bb4641b1fd1689ec3f2aecb99c46e83ce..2db449fed3003c352e692c3bba344cfa320e952c 100644 (file)
@@ -147,14 +147,6 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 /* Number of bytes in PSP footer for firmware. */
 #define PSP_FOOTER_BYTES 0x100
 
-/*
- * DMUB Async to Sync Mechanism Status
- */
-#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
-#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
-#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
-#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4
-
 /**
  * DOC: overview
  *
@@ -219,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
 
 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *amdgpu_dm_connector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *amdgpu_encoder);
 static int amdgpu_dm_encoder_init(struct drm_device *dev,
                                  struct amdgpu_encoder *aencoder,
@@ -271,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                                  u32 *vbl, u32 *position)
 {
-       uint32_t v_blank_start, v_blank_end, h_position, v_position;
+       u32 v_blank_start, v_blank_end, h_position, v_position;
 
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
@@ -370,7 +362,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
        struct amdgpu_device *adev = irq_params->adev;
        unsigned long flags;
        struct drm_pending_vblank_event *e;
-       uint32_t vpos, hpos, v_blank_start, v_blank_end;
+       u32 vpos, hpos, v_blank_start, v_blank_end;
        bool vrr_active;
 
        amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -657,7 +649,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        struct dc_link *link;
-       uint8_t link_index = 0;
+       u8 link_index = 0;
        struct drm_device *dev;
 
        if (adev == NULL)
@@ -758,7 +750,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dmcub_trace_buf_entry entry = { 0 };
-       uint32_t count = 0;
+       u32 count = 0;
        struct dmub_hpd_work *dmub_hpd_wrk;
        struct dc_link *plink = NULL;
 
@@ -1024,7 +1016,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        struct dmub_srv_hw_params hw_params;
        enum dmub_status status;
        const unsigned char *fw_inst_const, *fw_bss_data;
-       uint32_t i, fw_inst_const_size, fw_bss_data_size;
+       u32 i, fw_inst_const_size, fw_bss_data_size;
        bool has_hw_support;
 
        if (!dmub_srv)
@@ -1105,7 +1097,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        /* Initialize hardware. */
        memset(&hw_params, 0, sizeof(hw_params));
        hw_params.fb_base = adev->gmc.fb_start;
-       hw_params.fb_offset = adev->gmc.aper_base;
+       hw_params.fb_offset = adev->vm_manager.vram_base_offset;
 
        /* backdoor load firmware and trigger dmub running */
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
@@ -1185,10 +1177,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
 
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
-       uint64_t pt_base;
-       uint32_t logical_addr_low;
-       uint32_t logical_addr_high;
-       uint32_t agp_base, agp_bot, agp_top;
+       u64 pt_base;
+       u32 logical_addr_low;
+       u32 logical_addr_high;
+       u32 agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
        memset(pa_config, 0, sizeof(*pa_config));
@@ -1227,7 +1219,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
 
        pa_config->system_aperture.fb_base = adev->gmc.fb_start;
-       pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
+       pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
        pa_config->system_aperture.fb_top = adev->gmc.fb_end;
 
        pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
@@ -1372,7 +1364,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+               },
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+               },
+       },
        {}
+       /* TODO: refactor this from a fixed table to a dynamic option */
 };
 
 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
@@ -1405,6 +1434,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        memset(&init_params, 0, sizeof(init_params));
 #endif
 
+       mutex_init(&adev->dm.dpia_aux_lock);
        mutex_init(&adev->dm.dc_lock);
        mutex_init(&adev->dm.audio_lock);
 
@@ -1474,6 +1504,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
+               case IP_VERSION(3, 1, 4):
                case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
@@ -1612,7 +1643,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        }
 #endif
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+       adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+       if (!adev->dm.secure_display_ctxs) {
+               DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+       }
 #endif
        if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1707,10 +1741,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       if (adev->dm.crc_rd_wrk) {
-               flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
-               kfree(adev->dm.crc_rd_wrk);
-               adev->dm.crc_rd_wrk = NULL;
+       if (adev->dm.secure_display_ctxs) {
+               for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+                       if (adev->dm.secure_display_ctxs[i].crtc) {
+                               flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+                               flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+                       }
+               }
+               kfree(adev->dm.secure_display_ctxs);
+               adev->dm.secure_display_ctxs = NULL;
        }
 #endif
 #ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1769,6 +1808,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 
        mutex_destroy(&adev->dm.audio_lock);
        mutex_destroy(&adev->dm.dc_lock);
+       mutex_destroy(&adev->dm.dpia_aux_lock);
 
        return;
 }
@@ -1844,25 +1884,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
                return 0;
        }
 
-       r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
-       if (r == -ENOENT) {
+       r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+       if (r == -ENODEV) {
                /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
                DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
                adev->dm.fw_dmcu = NULL;
                return 0;
        }
-       if (r) {
-               dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
-                       fw_name_dmcu);
-               return r;
-       }
-
-       r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
        if (r) {
                dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
                        fw_name_dmcu);
-               release_firmware(adev->dm.fw_dmcu);
-               adev->dm.fw_dmcu = NULL;
+               amdgpu_ucode_release(&adev->dm.fw_dmcu);
                return r;
        }
 
@@ -1908,7 +1940,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        struct dmub_srv_fb_info *fb_info;
        struct dmub_srv *dmub_srv;
        const struct dmcub_firmware_header_v1_0 *hdr;
-       const char *fw_name_dmub;
        enum dmub_asic dmub_asic;
        enum dmub_status status;
        int r;
@@ -1916,73 +1947,46 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        switch (adev->ip_versions[DCE_HWIP][0]) {
        case IP_VERSION(2, 1, 0):
                dmub_asic = DMUB_ASIC_DCN21;
-               fw_name_dmub = FIRMWARE_RENOIR_DMUB;
-               if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
-                       fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
                break;
        case IP_VERSION(3, 0, 0):
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
                        dmub_asic = DMUB_ASIC_DCN30;
-                       fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
-               } else {
+               else
                        dmub_asic = DMUB_ASIC_DCN30;
-                       fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
-               }
                break;
        case IP_VERSION(3, 0, 1):
                dmub_asic = DMUB_ASIC_DCN301;
-               fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
                break;
        case IP_VERSION(3, 0, 2):
                dmub_asic = DMUB_ASIC_DCN302;
-               fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
                break;
        case IP_VERSION(3, 0, 3):
                dmub_asic = DMUB_ASIC_DCN303;
-               fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
                break;
        case IP_VERSION(3, 1, 2):
        case IP_VERSION(3, 1, 3):
                dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
-               fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
                break;
        case IP_VERSION(3, 1, 4):
                dmub_asic = DMUB_ASIC_DCN314;
-               fw_name_dmub = FIRMWARE_DCN_314_DMUB;
                break;
        case IP_VERSION(3, 1, 5):
                dmub_asic = DMUB_ASIC_DCN315;
-               fw_name_dmub = FIRMWARE_DCN_315_DMUB;
                break;
        case IP_VERSION(3, 1, 6):
                dmub_asic = DMUB_ASIC_DCN316;
-               fw_name_dmub = FIRMWARE_DCN316_DMUB;
                break;
        case IP_VERSION(3, 2, 0):
                dmub_asic = DMUB_ASIC_DCN32;
-               fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
                break;
        case IP_VERSION(3, 2, 1):
                dmub_asic = DMUB_ASIC_DCN321;
-               fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
                break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
        }
 
-       r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
-       if (r) {
-               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
-               return 0;
-       }
-
-       r = amdgpu_ucode_validate(adev->dm.dmub_fw);
-       if (r) {
-               DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
-               return 0;
-       }
-
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
        adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
@@ -2049,7 +2053,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
         * TODO: Move this into GART.
         */
        r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+                                   AMDGPU_GEM_DOMAIN_VRAM |
+                                   AMDGPU_GEM_DOMAIN_GTT,
+                                   &adev->dm.dmub_bo,
                                    &adev->dm.dmub_bo_gpu_addr,
                                    &adev->dm.dmub_bo_cpu_addr);
        if (r)
@@ -2104,11 +2110,8 @@ static int dm_sw_fini(void *handle)
                adev->dm.dmub_srv = NULL;
        }
 
-       release_firmware(adev->dm.dmub_fw);
-       adev->dm.dmub_fw = NULL;
-
-       release_firmware(adev->dm.fw_dmcu);
-       adev->dm.fw_dmcu = NULL;
+       amdgpu_ucode_release(&adev->dm.dmub_fw);
+       amdgpu_ucode_release(&adev->dm.fw_dmcu);
 
        return 0;
 }
@@ -2134,6 +2137,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
                                DRM_ERROR("DM_MST: Failed to start MST\n");
                                aconnector->dc_link->type =
                                        dc_connection_single;
+                               ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+                                                                    aconnector->dc_link);
                                break;
                        }
                }
@@ -2455,7 +2460,7 @@ struct amdgpu_dm_connector *
 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
                                             struct drm_crtc *crtc)
 {
-       uint32_t i;
+       u32 i;
        struct drm_connector_state *new_con_state;
        struct drm_connector *connector;
        struct drm_crtc *crtc_from_state;
@@ -2703,12 +2708,14 @@ static int dm_resume(void *handle)
        drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
 
+               if (!aconnector->dc_link)
+                       continue;
+
                /*
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->dc_link &&
-                   aconnector->dc_link->type == dc_connection_mst_branch)
+               if (aconnector->dc_link->type == dc_connection_mst_branch)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
@@ -3086,8 +3093,8 @@ static void handle_hpd_irq(void *param)
 
 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 {
-       uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
-       uint8_t dret;
+       u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+       u8 dret;
        bool new_irq_handled = false;
        int dpcd_addr;
        int dpcd_bytes_to_read;
@@ -3115,7 +3122,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 
        while (dret == dpcd_bytes_to_read &&
                process_count < max_process_count) {
-               uint8_t retry;
+               u8 retry;
                dret = 0;
 
                process_count++;
@@ -3134,7 +3141,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
                                dpcd_bytes_to_read - 1;
 
                        for (retry = 0; retry < 3; retry++) {
-                               uint8_t wret;
+                               u8 wret;
 
                                wret = drm_dp_dpcd_write(
                                        &aconnector->dm_dp_aux.aux,
@@ -4148,12 +4155,12 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 {
        struct amdgpu_display_manager *dm = &adev->dm;
-       int32_t i;
+       s32 i;
        struct amdgpu_dm_connector *aconnector = NULL;
        struct amdgpu_encoder *aencoder = NULL;
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       uint32_t link_cnt;
-       int32_t primary_planes;
+       u32 link_cnt;
+       s32 primary_planes;
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
@@ -4330,6 +4337,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                amdgpu_set_panel_orientation(&aconnector->base);
        }
 
+       /* If we didn't find a panel, notify the acpi video detection */
+       if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
+               acpi_video_report_nolcd();
+
        /* Software is initialized. Now we can register interrupt handlers. */
        switch (adev->asic_type) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
@@ -4469,6 +4480,61 @@ DEVICE_ATTR_WO(s3_debug);
 
 #endif
 
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+       char *fw_name_dmub;
+       int r;
+
+       switch (adev->ip_versions[DCE_HWIP][0]) {
+       case IP_VERSION(2, 1, 0):
+               fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+               if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+                       fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+               break;
+       case IP_VERSION(3, 0, 0):
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+                       fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+               else
+                       fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+               break;
+       case IP_VERSION(3, 0, 1):
+               fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+               break;
+       case IP_VERSION(3, 0, 2):
+               fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+               break;
+       case IP_VERSION(3, 0, 3):
+               fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+               break;
+       case IP_VERSION(3, 1, 2):
+       case IP_VERSION(3, 1, 3):
+               fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+               break;
+       case IP_VERSION(3, 1, 4):
+               fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+               break;
+       case IP_VERSION(3, 1, 5):
+               fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+               break;
+       case IP_VERSION(3, 1, 6):
+               fw_name_dmub = FIRMWARE_DCN316_DMUB;
+               break;
+       case IP_VERSION(3, 2, 0):
+               fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+               break;
+       case IP_VERSION(3, 2, 1):
+               fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+               break;
+       default:
+               /* ASIC doesn't support DMUB. */
+               return 0;
+       }
+       r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+       if (r)
+               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+       return r;
+}
+
 static int dm_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4601,7 +4667,7 @@ static int dm_early_init(void *handle)
 #endif
        adev->dc_enabled = true;
 
-       return 0;
+       return dm_init_microcode(adev);
 }
 
 static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4666,7 +4732,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 static int
 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
-                           const uint64_t tiling_flags,
+                           const u64 tiling_flags,
                            struct dc_plane_info *plane_info,
                            struct dc_plane_address *address,
                            bool tmz_surface,
@@ -4839,6 +4905,35 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        return 0;
 }
 
+static inline void fill_dc_dirty_rect(struct drm_plane *plane,
+                                     struct rect *dirty_rect, int32_t x,
+                                     s32 y, s32 width, s32 height,
+                                     int *i, bool ffu)
+{
+       if (*i > DC_MAX_DIRTY_RECTS)
+               return;
+
+       if (*i == DC_MAX_DIRTY_RECTS)
+               goto out;
+
+       dirty_rect->x = x;
+       dirty_rect->y = y;
+       dirty_rect->width = width;
+       dirty_rect->height = height;
+
+       if (ffu)
+               drm_dbg(plane->dev,
+                       "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+                       plane->base.id, width, height);
+       else
+               drm_dbg(plane->dev,
+                       "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
+                       plane->base.id, x, y, width, height);
+
+out:
+       (*i)++;
+}
+
 /**
  * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
  *
@@ -4859,10 +4954,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
  * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
  * implicitly provide damage clips without any client support via the plane
  * bounds.
- *
- * Today, amdgpu_dm only supports the MPO and cursor usecase.
- *
- * TODO: Also enable for FB_DAMAGE_CLIPS
  */
 static void fill_dc_dirty_rects(struct drm_plane *plane,
                                struct drm_plane_state *old_plane_state,
@@ -4872,12 +4963,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
 {
        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
        struct rect *dirty_rects = flip_addrs->dirty_rects;
-       uint32_t num_clips;
+       u32 num_clips;
+       struct drm_mode_rect *clips;
        bool bb_changed;
        bool fb_changed;
-       uint32_t i = 0;
-
-       flip_addrs->dirty_rect_count = 0;
+       u32 i = 0;
 
        /*
         * Cursor plane has it's own dirty rect update interface. See
@@ -4886,20 +4976,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
        if (plane->type == DRM_PLANE_TYPE_CURSOR)
                return;
 
-       /*
-        * Today, we only consider MPO use-case for PSR SU. If MPO not
-        * requested, and there is a plane update, do FFU.
-        */
+       num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+       clips = drm_plane_get_damage_clips(new_plane_state);
+
        if (!dm_crtc_state->mpo_requested) {
-               dirty_rects[0].x = 0;
-               dirty_rects[0].y = 0;
-               dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
-               dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
-               flip_addrs->dirty_rect_count = 1;
-               DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
-                                new_plane_state->plane->base.id,
-                                dm_crtc_state->base.mode.crtc_hdisplay,
-                                dm_crtc_state->base.mode.crtc_vdisplay);
+               if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
+                       goto ffu;
+
+               for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+                       fill_dc_dirty_rect(new_plane_state->plane,
+                                          &dirty_rects[i], clips->x1,
+                                          clips->y1, clips->x2 - clips->x1,
+                                          clips->y2 - clips->y1,
+                                          &flip_addrs->dirty_rect_count,
+                                          false);
                return;
        }
 
@@ -4910,7 +5000,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
         * If plane is moved or resized, also add old bounding box to dirty
         * rects.
         */
-       num_clips = drm_plane_get_damage_clips_count(new_plane_state);
        fb_changed = old_plane_state->fb->base.id !=
                     new_plane_state->fb->base.id;
        bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
@@ -4918,36 +5007,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
                      old_plane_state->crtc_w != new_plane_state->crtc_w ||
                      old_plane_state->crtc_h != new_plane_state->crtc_h);
 
-       DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
-                        new_plane_state->plane->base.id,
-                        bb_changed, fb_changed, num_clips);
-
-       if (num_clips || fb_changed || bb_changed) {
-               dirty_rects[i].x = new_plane_state->crtc_x;
-               dirty_rects[i].y = new_plane_state->crtc_y;
-               dirty_rects[i].width = new_plane_state->crtc_w;
-               dirty_rects[i].height = new_plane_state->crtc_h;
-               DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
-                                new_plane_state->plane->base.id,
-                                dirty_rects[i].x, dirty_rects[i].y,
-                                dirty_rects[i].width, dirty_rects[i].height);
-               i += 1;
-       }
+       drm_dbg(plane->dev,
+               "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+               new_plane_state->plane->base.id,
+               bb_changed, fb_changed, num_clips);
 
-       /* Add old plane bounding-box if plane is moved or resized */
        if (bb_changed) {
-               dirty_rects[i].x = old_plane_state->crtc_x;
-               dirty_rects[i].y = old_plane_state->crtc_y;
-               dirty_rects[i].width = old_plane_state->crtc_w;
-               dirty_rects[i].height = old_plane_state->crtc_h;
-               DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
-                               old_plane_state->plane->base.id,
-                               dirty_rects[i].x, dirty_rects[i].y,
-                               dirty_rects[i].width, dirty_rects[i].height);
-               i += 1;
-       }
+               fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+                                  new_plane_state->crtc_x,
+                                  new_plane_state->crtc_y,
+                                  new_plane_state->crtc_w,
+                                  new_plane_state->crtc_h, &i, false);
+
+               /* Add old plane bounding-box if plane is moved or resized */
+               fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+                                  old_plane_state->crtc_x,
+                                  old_plane_state->crtc_y,
+                                  old_plane_state->crtc_w,
+                                  old_plane_state->crtc_h, &i, false);
+       }
+
+       if (num_clips) {
+               for (; i < num_clips; clips++)
+                       fill_dc_dirty_rect(new_plane_state->plane,
+                                          &dirty_rects[i], clips->x1,
+                                          clips->y1, clips->x2 - clips->x1,
+                                          clips->y2 - clips->y1, &i, false);
+       } else if (fb_changed && !bb_changed) {
+               fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+                                  new_plane_state->crtc_x,
+                                  new_plane_state->crtc_y,
+                                  new_plane_state->crtc_w,
+                                  new_plane_state->crtc_h, &i, false);
+       }
+
+       if (i > DC_MAX_DIRTY_RECTS)
+               goto ffu;
 
        flip_addrs->dirty_rect_count = i;
+       return;
+
+ffu:
+       fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
+                          dm_crtc_state->base.mode.crtc_hdisplay,
+                          dm_crtc_state->base.mode.crtc_vdisplay,
+                          &flip_addrs->dirty_rect_count, true);
 }
 
 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
@@ -5009,7 +5113,7 @@ static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
                                      bool is_y420, int requested_bpc)
 {
-       uint8_t bpc;
+       u8 bpc;
 
        if (is_y420) {
                bpc = 8;
@@ -5553,8 +5657,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                                    uint32_t max_dsc_target_bpp_limit_override)
 {
        const struct dc_link_settings *verified_link_cap = NULL;
-       uint32_t link_bw_in_kbps;
-       uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+       u32 link_bw_in_kbps;
+       u32 edp_min_bpp_x16, edp_max_bpp_x16;
        struct dc *dc = sink->ctx->dc;
        struct dc_dsc_bw_range bw_range = {0};
        struct dc_dsc_config dsc_cfg = {0};
@@ -5611,11 +5715,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                                        struct dsc_dec_dpcd_caps *dsc_caps)
 {
        struct drm_connector *drm_connector = &aconnector->base;
-       uint32_t link_bandwidth_kbps;
+       u32 link_bandwidth_kbps;
        struct dc *dc = sink->ctx->dc;
-       uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
-       uint32_t dsc_max_supported_bw_in_kbps;
-       uint32_t max_dsc_target_bpp_limit_override =
+       u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+       u32 dsc_max_supported_bw_in_kbps;
+       u32 max_dsc_target_bpp_limit_override =
                drm_connector->display_info.max_dsc_bpp;
 
        link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -5762,7 +5866,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = amdgpu_freesync_vid_mode &&
+                                is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        drm_mode_copy(&saved_mode, &mode);
@@ -6836,7 +6941,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
        const struct drm_display_mode *m;
        struct drm_display_mode *new_mode;
        uint i;
-       uint32_t new_modes_count = 0;
+       u32 new_modes_count = 0;
 
        /* Standard FPS values
         *
@@ -6850,7 +6955,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
         * 60           - Commonly used
         * 48,72,96,120 - Multiples of 24
         */
-       static const uint32_t common_rates[] = {
+       static const u32 common_rates[] = {
                23976, 24000, 25000, 29970, 30000,
                48000, 50000, 60000, 72000, 96000, 120000
        };
@@ -6866,8 +6971,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
-               uint64_t target_vtotal, target_vtotal_diff;
-               uint64_t num, den;
+               u64 target_vtotal, target_vtotal_diff;
+               u64 num, den;
 
                if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
                        continue;
@@ -6913,7 +7018,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
 
-       if (!edid)
+       if (!(amdgpu_freesync_vid_mode && edid))
                return;
 
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -7109,7 +7214,7 @@ create_i2c(struct ddc_service *ddc_service,
  */
 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *aconnector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *aencoder)
 {
        int res = 0;
@@ -7294,27 +7399,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
 }
 
 #ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
-                                           const struct drm_connector_state *old_state,
-                                           const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+                                           struct drm_crtc_state *old_crtc_state,
+                                           struct drm_connector_state *new_conn_state,
+                                           struct drm_connector_state *old_conn_state,
+                                           const struct drm_connector *connector,
+                                           struct hdcp_workqueue *hdcp_w)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 
-       /* Handle: Type0/1 change */
-       if (old_state->hdcp_content_type != state->hdcp_content_type &&
-           state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+               connector->index, connector->status, connector->dpms);
+       pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+               old_conn_state->content_protection, new_conn_state->content_protection);
+
+       if (old_crtc_state)
+               pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               old_crtc_state->enable,
+               old_crtc_state->active,
+               old_crtc_state->mode_changed,
+               old_crtc_state->active_changed,
+               old_crtc_state->connectors_changed);
+
+       if (new_crtc_state)
+               pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               new_crtc_state->enable,
+               new_crtc_state->active,
+               new_crtc_state->mode_changed,
+               new_crtc_state->active_changed,
+               new_crtc_state->connectors_changed);
+
+       /* hdcp content type change */
+       if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+           new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
                return true;
        }
 
-       /* CP is being re enabled, ignore this
-        *
-        * Handles:     ENABLED -> DESIRED
-        */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+       /* CP is being re enabled, ignore this */
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               if (new_crtc_state && new_crtc_state->mode_changed) {
+                       new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+                       pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+                       return true;
+               }
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
                return false;
        }
 
@@ -7322,9 +7455,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     UNDESIRED -> ENABLED
         */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
        /* Stream removed and re-enabled
         *
@@ -7334,10 +7467,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (!(old_state->crtc && old_state->crtc->enabled) &&
-               state->crtc && state->crtc->enabled &&
+       if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+               new_conn_state->crtc && new_conn_state->crtc->enabled &&
                connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+                       __func__);
                return true;
        }
 
@@ -7349,35 +7484,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-           connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+       if (dm_con_state->update_hdcp &&
+       new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+       connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+                       __func__);
                return true;
        }
 
-       /*
-        * Handles:     UNDESIRED -> UNDESIRED
-        *              DESIRED -> DESIRED
-        *              ENABLED -> ENABLED
-        */
-       if (old_state->content_protection == state->content_protection)
+       if (old_conn_state->content_protection == new_conn_state->content_protection) {
+               if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+                       if (new_crtc_state && new_crtc_state->mode_changed) {
+                               pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+                                       __func__);
+                               return true;
+                       }
+                       pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+                               __func__);
+                       return false;
+               }
+
+               pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
                return false;
+       }
 
-       /*
-        * Handles:     UNDESIRED -> DESIRED
-        *              DESIRED -> UNDESIRED
-        *              ENABLED -> UNDESIRED
-        */
-       if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+       if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+               pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+                       __func__);
                return true;
+       }
 
-       /*
-        * Handles:     DESIRED -> ENABLED
-        */
+       pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
        return false;
 }
-
 #endif
+
 static void remove_stream(struct amdgpu_device *adev,
                          struct amdgpu_crtc *acrtc,
                          struct dc_stream_state *stream)
@@ -7593,8 +7735,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                    struct drm_crtc *pcrtc,
                                    bool wait_for_vblank)
 {
-       uint32_t i;
-       uint64_t timestamp_ns;
+       u32 i;
+       u64 timestamp_ns;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7605,7 +7747,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
        int planes_count = 0, vpos, hpos;
        unsigned long flags;
-       uint32_t target_vblank, last_flip_vblank;
+       u32 target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool cursor_update = false;
        bool pflip_present = false;
@@ -8043,7 +8185,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dm_atomic_state *dm_state;
        struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
-       uint32_t i, j;
+       u32 i, j;
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        unsigned long flags;
@@ -8212,15 +8354,66 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 #ifdef CONFIG_DRM_AMD_DC_HDCP
+       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+               struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+               struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+               pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+               if (!connector)
+                       continue;
+
+               pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+                       connector->index, connector->status, connector->dpms);
+               pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+                       old_con_state->content_protection, new_con_state->content_protection);
+
+               if (aconnector->dc_sink) {
+                       if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+                               aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+                               pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+                               aconnector->dc_sink->edid_caps.display_name);
+                       }
+               }
+
+               new_crtc_state = NULL;
+               old_crtc_state = NULL;
+
+               if (acrtc) {
+                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
+
+               if (old_crtc_state)
+                       pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       old_crtc_state->enable,
+                       old_crtc_state->active,
+                       old_crtc_state->mode_changed,
+                       old_crtc_state->active_changed,
+                       old_crtc_state->connectors_changed);
+
+               if (new_crtc_state)
+                       pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       new_crtc_state->enable,
+                       new_crtc_state->active,
+                       new_crtc_state->mode_changed,
+                       new_crtc_state->active_changed,
+                       new_crtc_state->connectors_changed);
+       }
+
        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
                struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
                new_crtc_state = NULL;
+               old_crtc_state = NULL;
 
-               if (acrtc)
+               if (acrtc) {
                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
 
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8232,11 +8425,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        continue;
                }
 
-               if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+               if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+                                                                                       old_con_state, connector, adev->dm.hdcp_workqueue)) {
+                       /* when display is unplugged from mst hub, connctor will
+                        * be destroyed within dm_dp_mst_connector_destroy. connector
+                        * hdcp perperties, like type, undesired, desired, enabled,
+                        * will be lost. So, save hdcp properties into hdcp_work within
+                        * amdgpu_dm_atomic_commit_tail. if the same display is
+                        * plugged back with same display index, its hdcp properties
+                        * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+                        */
+
+                       bool enable_encryption = false;
+
+                       if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+
+                       if (aconnector->dc_link && aconnector->dc_sink &&
+                               aconnector->dc_link->type == dc_connection_mst_branch) {
+                               struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+                               struct hdcp_workqueue *hdcp_w =
+                                       &hdcp_work[aconnector->dc_link->link_index];
+
+                               hdcp_w->hdcp_content_type[connector->index] =
+                                       new_con_state->hdcp_content_type;
+                               hdcp_w->content_protection[connector->index] =
+                                       new_con_state->content_protection;
+                       }
+
+                       if (new_crtc_state && new_crtc_state->mode_changed &&
+                               new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+
+                       DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
                        hdcp_update_display(
                                adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
-                               new_con_state->hdcp_content_type,
-                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+                               new_con_state->hdcp_content_type, enable_encryption);
+               }
        }
 #endif
 
@@ -8334,9 +8560,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 #ifdef CONFIG_DEBUG_FS
                enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               struct crc_rd_work *crc_rd_wrk;
-#endif
 #endif
                /* Count number of newly disabled CRTCs for dropping PM refs later. */
                if (old_crtc_state->active && !new_crtc_state->active)
@@ -8349,9 +8572,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                update_stream_irq_parameters(dm, dm_new_crtc_state);
 
 #ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               crc_rd_wrk = dm->crc_rd_wrk;
-#endif
                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                cur_crc_src = acrtc->dm_irq_params.crc_src;
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8380,10 +8600,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                if (amdgpu_dm_crc_window_is_activated(crtc)) {
                                        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                        acrtc->dm_irq_params.window_param.update_win = true;
+
+                                       /**
+                                        * It takes 2 frames for HW to stably generate CRC when
+                                        * resuming from suspend, so we set skip_frame_cnt 2.
+                                        */
                                        acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
-                                       spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
-                                       crc_rd_wrk->crtc = crtc;
-                                       spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
                                        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                                }
 #endif
@@ -8606,15 +8828,22 @@ static void get_freesync_config_for_crtc(
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
        int vrefresh = drm_mode_vrefresh(mode);
        bool fs_vid_mode = false;
+       bool drr_active = false;
 
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
                                        vrefresh >= aconnector->min_vfreq &&
                                        vrefresh <= aconnector->max_vfreq;
 
-       if (new_crtc_state->vrr_supported) {
+       drr_active = new_crtc_state->vrr_supported &&
+               new_crtc_state->freesync_config.state != VRR_STATE_DISABLED &&
+               new_crtc_state->freesync_config.state != VRR_STATE_INACTIVE &&
+               new_crtc_state->freesync_config.state != VRR_STATE_UNSUPPORTED;
+
+       if (drr_active)
                new_crtc_state->stream->ignore_msa_timing_param = true;
-               fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
 
+       if (new_crtc_state->vrr_supported) {
+               fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
                config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
                config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
                config.vsif_supported = true;
@@ -8674,7 +8903,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 }
 
 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
-       uint64_t num, den, res;
+       u64 num, den, res;
        struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
 
        dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -8777,7 +9006,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode &&
+                   dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
 
@@ -8812,7 +9042,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
 
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
@@ -8824,7 +9054,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                        set_freesync_fixed_config(dm_new_crtc_state);
 
                        goto skip_modeset;
-               } else if (aconnector &&
+               } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
@@ -9810,7 +10040,7 @@ fail:
 static bool is_dp_capable_without_timing_msa(struct dc *dc,
                                             struct amdgpu_dm_connector *amdgpu_dm_connector)
 {
-       uint8_t dpcd_data;
+       u8 dpcd_data;
        bool capable = false;
 
        if (amdgpu_dm_connector->dc_link &&
@@ -9829,7 +10059,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
                unsigned int offset,
                unsigned int total_length,
-               uint8_t *data,
+               u8 *data,
                unsigned int length,
                struct amdgpu_hdmi_vsdb_info *vsdb)
 {
@@ -9884,7 +10114,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        int i;
@@ -9925,7 +10155,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        int i;
@@ -9941,7 +10171,7 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
@@ -9955,7 +10185,7 @@ static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
                struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
-       uint8_t *edid_ext = NULL;
+       u8 *edid_ext = NULL;
        int i;
        bool valid_vsdb_found = false;
 
@@ -10131,7 +10361,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
 }
 
 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
-                      uint32_t value, const char *func_name)
+                      u32 value, const char *func_name)
 {
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
@@ -10146,7 +10376,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
                          const char *func_name)
 {
-       uint32_t value;
+       u32 value;
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
                DC_ERR("invalid register read; address = 0\n");
@@ -10168,91 +10398,92 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
        return value;
 }
 
-static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux,
-                                               struct dc_context *ctx,
-                                               uint8_t status_type,
-                                               uint32_t *operation_result)
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+               struct dc_context *ctx,
+               unsigned int link_index,
+               struct aux_payload *payload,
+               enum aux_return_code_type *operation_result)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       int return_status = -1;
        struct dmub_notification *p_notify = adev->dm.dmub_notify;
+       int ret = -1;
 
-       if (is_cmd_aux) {
-               if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
-                       return_status = p_notify->aux_reply.length;
-                       *operation_result = p_notify->result;
-               } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
-                       *operation_result = AUX_RET_ERROR_TIMEOUT;
-               } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
-                       *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
-               } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) {
-                       *operation_result = AUX_RET_ERROR_INVALID_REPLY;
-               } else {
-                       *operation_result = AUX_RET_ERROR_UNKNOWN;
+       mutex_lock(&adev->dm.dpia_aux_lock);
+       if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+               *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+               goto out;
+       }
+
+       if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+               DRM_ERROR("wait_for_completion_timeout timeout!");
+               *operation_result = AUX_RET_ERROR_TIMEOUT;
+               goto out;
+       }
+
+       if (p_notify->result != AUX_RET_SUCCESS) {
+               /*
+                * Transient states before tunneling is enabled could
+                * lead to this error. We can ignore this for now.
+                */
+               if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+                       DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+                                       payload->address, payload->length,
+                                       p_notify->result);
                }
-       } else {
-               if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
-                       return_status = 0;
-                       *operation_result = p_notify->sc_status;
-               } else {
-                       *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+               *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+               goto out;
+       }
+
+
+       payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+       if (!payload->write && p_notify->aux_reply.length &&
+                       (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+
+               if (payload->length != p_notify->aux_reply.length) {
+                       DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+                               p_notify->aux_reply.length,
+                                       payload->address, payload->length);
+                       *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+                       goto out;
                }
+
+               memcpy(payload->data, p_notify->aux_reply.data,
+                               p_notify->aux_reply.length);
        }
 
-       return return_status;
+       /* success */
+       ret = p_notify->aux_reply.length;
+       *operation_result = p_notify->result;
+out:
+       mutex_unlock(&adev->dm.dpia_aux_lock);
+       return ret;
 }
 
-int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
-       unsigned int link_index, void *cmd_payload, void *operation_result)
+int amdgpu_dm_process_dmub_set_config_sync(
+               struct dc_context *ctx,
+               unsigned int link_index,
+               struct set_config_cmd_payload *payload,
+               enum set_config_status *operation_result)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       int ret = 0;
+       bool is_cmd_complete;
+       int ret;
 
-       if (is_cmd_aux) {
-               dc_process_dmub_aux_transfer_async(ctx->dc,
-                       link_index, (struct aux_payload *)cmd_payload);
-       } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
-                                       (struct set_config_cmd_payload *)cmd_payload,
-                                       adev->dm.dmub_notify)) {
-               return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
-                                       ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
-                                       (uint32_t *)operation_result);
-       }
+       mutex_lock(&adev->dm.dpia_aux_lock);
+       is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
+                       link_index, payload, adev->dm.dmub_notify);
 
-       ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
-       if (ret == 0) {
+       if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+               ret = 0;
+               *operation_result = adev->dm.dmub_notify->sc_status;
+       } else {
                DRM_ERROR("wait_for_completion_timeout timeout!");
-               return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
-                               ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
-                               (uint32_t *)operation_result);
-       }
-
-       if (is_cmd_aux) {
-               if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
-                       struct aux_payload *payload = (struct aux_payload *)cmd_payload;
-
-                       payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
-                       if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
-                           payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
-
-                               if (payload->length != adev->dm.dmub_notify->aux_reply.length) {
-                                       DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n",
-                                                       payload->address, payload->length,
-                                                       adev->dm.dmub_notify->aux_reply.length);
-                                       return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx,
-                                                       DMUB_ASYNC_TO_SYNC_ACCESS_INVALID,
-                                                       (uint32_t *)operation_result);
-                               }
-
-                               memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
-                                      adev->dm.dmub_notify->aux_reply.length);
-                       }
-               }
+               ret = -1;
+               *operation_result = SET_CONFIG_UNKNOWN_ERROR;
        }
 
-       return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
-                       ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
-                       (uint32_t *)operation_result);
+       mutex_unlock(&adev->dm.dpia_aux_lock);
+       return ret;
 }
 
 /*