Merge tag 'amd-drm-next-5.18-2022-02-11-1' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <airlied@redhat.com>
Mon, 14 Feb 2022 00:31:51 +0000 (10:31 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 14 Feb 2022 00:31:51 +0000 (10:31 +1000)
amd-drm-next-5.18-2022-02-11-1:

amdgpu:
- Clean up of power management code
- Enable freesync video mode by default
- Clean up of RAS code
- Improve VRAM access for debug using SDMA
- Coding style cleanups
- SR-IOV fixes
- More display FP reorg
- TLB flush fixes for Arcuturus, Vega20
- Misc display fixes
- Rework special register access methods for SR-IOV
- DP2 fixes
- DP tunneling fixes
- DSC fixes
- More IP discovery cleanups
- Misc RAS fixes
- Enable both SMU i2c buses where applicable
- s2idle improvements
- DPCS header cleanup
- Add new CAP firmware support for SR-IOV

amdkfd:
- Misc cleanups
- SVM fixes
- CRIU support
- Clean up MQD manager

UAPI:
- Add interface to amdgpu CTX ioctl to request a stable power state for profiling
  https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/207
- Add amdkfd support for CRIU
  https://github.com/checkpoint-restore/criu/pull/1709
- Remove old unused amdkfd debugger interface
  Was only implemented for Kaveri and was only ever used by an old HSA tool that was never open sourced

radeon:
- Fix error handling in radeon_driver_open_kms
- UVD suspend fix
- Misc fixes

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220211220706.5803-1-alexander.deucher@amd.com
16 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/include/dpcd_defs.h

index d8b854fcbffa7dc8a4925bf90eabab4475f04696,92f757c007d5ea5cdf3fd1cb0aca7cb17f4b466b..2931c8ff4cc63842b73f41b7f88460570d153b0e
@@@ -99,7 -99,6 +99,6 @@@
  #include "amdgpu_gem.h"
  #include "amdgpu_doorbell.h"
  #include "amdgpu_amdkfd.h"
- #include "amdgpu_smu.h"
  #include "amdgpu_discovery.h"
  #include "amdgpu_mes.h"
  #include "amdgpu_umc.h"
  #include "amdgpu_smuio.h"
  #include "amdgpu_fdinfo.h"
  #include "amdgpu_mca.h"
+ #include "amdgpu_ras.h"
  
  #define MAX_GPU_INSTANCE              16
  
@@@ -197,7 -197,6 +197,6 @@@ extern int amdgpu_emu_mode
  extern uint amdgpu_smu_memory_pool_size;
  extern int amdgpu_smu_pptable_id;
  extern uint amdgpu_dc_feature_mask;
- extern uint amdgpu_freesync_vid_mode;
  extern uint amdgpu_dc_debug_mask;
  extern uint amdgpu_dm_abm_level;
  extern int amdgpu_backlight;
@@@ -373,7 -372,8 +372,8 @@@ int amdgpu_device_ip_block_add(struct a
   */
  bool amdgpu_get_bios(struct amdgpu_device *adev);
  bool amdgpu_read_bios(struct amdgpu_device *adev);
+ bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+                                    u8 *bios, u32 length_bytes);
  /*
   * Clocks
   */
@@@ -950,12 -950,6 +950,6 @@@ struct amdgpu_device 
  
        /* powerplay */
        struct amd_powerplay            powerplay;
-       bool                            pp_force_state_enabled;
-       /* smu */
-       struct smu_context              smu;
-       /* dpm */
        struct amdgpu_pm                pm;
        u32                             cg_flags;
        u32                             pg_flags;
        bool                            runpm;
        bool                            in_runpm;
        bool                            has_pr3;
 +      bool                            is_fw_fb;
  
        bool                            pm_sysfs_en;
        bool                            ucode_sysfs_en;
        uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
  
        bool                            ram_is_direct_mapped;
+       struct list_head                ras_list;
  };
  
  static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@@ -1321,6 -1316,10 +1317,10 @@@ void amdgpu_device_invalidate_hdp(struc
                struct amdgpu_ring *ring);
  
  void amdgpu_device_halt(struct amdgpu_device *adev);
+ u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+                               u32 reg);
+ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+                               u32 reg, u32 v);
  
  /* atpx handler */
  #if defined(CONFIG_VGA_SWITCHEROO)
@@@ -1408,12 -1407,10 +1408,10 @@@ int amdgpu_acpi_smart_shift_update(stru
  int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  
  void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
- bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
  void amdgpu_acpi_detect(void);
  #else
  static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
- static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
  static inline void amdgpu_acpi_detect(void) { }
  static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
  static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@@ -1422,6 -1419,14 +1420,14 @@@ static inline int amdgpu_acpi_smart_shi
                                                 enum amdgpu_ss ss_state) { return 0; }
  #endif
  
+ #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+ #else
+ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+ #endif
  int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                           uint64_t addr, struct amdgpu_bo **bo,
                           struct amdgpu_bo_va_mapping **mapping);
@@@ -1452,6 -1457,15 +1458,15 @@@ int amdgpu_device_set_cg_state(struct a
  int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
                               enum amd_powergating_state state);
  
+ static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
+ {
+       return amdgpu_gpu_recovery != 0 &&
+               adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
+ }
  #include "amdgpu_object.h"
  
  static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
index e8440d306496779ddab124c5393f252301ab1008,af12256e1bd38e82ae6eb837d4a4a69adfe7e491..10b9e99c8941206b066b1aac7cd47f7e686839d5
@@@ -127,8 -127,6 +127,6 @@@ static int amdgpu_cs_parser_init(struc
                goto free_chunk;
        }
  
-       mutex_lock(&p->ctx->lock);
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
@@@ -585,6 -583,16 +583,16 @@@ static int amdgpu_cs_parser_bos(struct 
                }
        }
  
+       /* Move fence waiting after getting reservation lock of
+        * PD root. Then there is no need on a ctx mutex lock.
+        */
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+               goto error_validate;
+       }
        amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
                                          &p->bytes_moved_vis_threshold);
        p->bytes_moved = 0;
@@@ -700,7 -708,6 +708,6 @@@ static void amdgpu_cs_parser_fini(struc
        dma_fence_put(parser->fence);
  
        if (parser->ctx) {
-               mutex_unlock(&parser->ctx->lock);
                amdgpu_ctx_put(parser->ctx);
        }
        if (parser->bo_list)
@@@ -944,7 -951,7 +951,7 @@@ static int amdgpu_cs_ib_fill(struct amd
        if (parser->job->uf_addr && ring->funcs->no_user_fence)
                return -EINVAL;
  
-       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
+       return 0;
  }
  
  static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@@ -1274,11 -1281,14 +1281,11 @@@ static int amdgpu_cs_submit(struct amdg
                /*
                 * Work around dma_resv shortcommings by wrapping up the
                 * submission in a dma_fence_chain and add it as exclusive
 -               * fence, but first add the submission as shared fence to make
 -               * sure that shared fences never signal before the exclusive
 -               * one.
 +               * fence.
                 */
                dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
                                     dma_fence_get(p->fence), 1);
  
 -              dma_resv_add_shared_fence(resv, p->fence);
                rcu_assign_pointer(resv->fence_excl, &chain->base);
                e->chain = NULL;
        }
@@@ -1360,7 -1370,6 +1367,6 @@@ int amdgpu_cs_ioctl(struct drm_device *
                goto out;
  
        r = amdgpu_cs_submit(&parser, cs);
  out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
  
@@@ -1506,6 -1515,7 +1512,7 @@@ int amdgpu_cs_fence_to_handle_ioctl(str
                return 0;
  
        default:
+               dma_fence_put(fence);
                return -EINVAL;
        }
  }
index 3f21a13882a878d78dcdce5bea23a98deb68d504,6cad39c31c58a61784be7587ca9365a36c9c0588..ec4c9ef5f795982a5af7b513330523f99d40b708
@@@ -200,10 -200,8 +200,10 @@@ int amdgpu_display_crtc_page_flip_targe
                goto unpin;
        }
  
 -      r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL,
 -                              &work->shared_count, &work->shared);
 +      /* TODO: Unify this with other drivers */
 +      r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
 +                              &work->shared_count,
 +                              &work->shared);
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to get fences for buffer\n");
                goto unpin;
@@@ -512,19 -510,24 +512,24 @@@ uint32_t amdgpu_display_supported_domai
                case CHIP_STONEY:
                        domain |= AMDGPU_GEM_DOMAIN_GTT;
                        break;
-               case CHIP_RAVEN:
-                       /* enable S/G on PCO and RV2 */
-                       if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
-                           (adev->apu_flags & AMD_APU_IS_PICASSO))
-                               domain |= AMDGPU_GEM_DOMAIN_GTT;
-                       break;
-               case CHIP_RENOIR:
-               case CHIP_VANGOGH:
-               case CHIP_YELLOW_CARP:
-                       domain |= AMDGPU_GEM_DOMAIN_GTT;
-                       break;
                default:
+                       switch (adev->ip_versions[DCE_HWIP][0]) {
+                       case IP_VERSION(1, 0, 0):
+                       case IP_VERSION(1, 0, 1):
+                               /* enable S/G on PCO and RV2 */
+                               if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+                                   (adev->apu_flags & AMD_APU_IS_PICASSO))
+                                       domain |= AMDGPU_GEM_DOMAIN_GTT;
+                               break;
+                       case IP_VERSION(2, 1, 0):
+                       case IP_VERSION(3, 0, 1):
+                       case IP_VERSION(3, 1, 2):
+                       case IP_VERSION(3, 1, 3):
+                               domain |= AMDGPU_GEM_DOMAIN_GTT;
+                               break;
+                       default:
+                               break;
+                       }
                        break;
                }
        }
index 4c83f1db8a244427ae65c23bc8af6f9b31a7fdfc,0a803e7e599473baf943ca1bbc4f0a4369403b2e..5cdafdcfec59ca5cf1ba9e52496f60fbde66de7f
@@@ -38,7 -38,6 +38,7 @@@
  #include <linux/mmu_notifier.h>
  #include <linux/suspend.h>
  #include <linux/cc_platform.h>
 +#include <linux/fb.h>
  
  #include "amdgpu.h"
  #include "amdgpu_irq.h"
   * - 3.42.0 - Add 16bpc fixed point display support
   * - 3.43.0 - Add device hot plug/unplug support
   * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
+  * - 3.45.0 - Add context ioctl stable pstate interface
   */
  #define KMS_DRIVER_MAJOR      3
- #define KMS_DRIVER_MINOR      44
+ #define KMS_DRIVER_MINOR      45
  #define KMS_DRIVER_PATCHLEVEL 0
  
  int amdgpu_vram_limit;
@@@ -174,7 -174,6 +175,6 @@@ int amdgpu_mes
  int amdgpu_noretry = -1;
  int amdgpu_force_asic_type = -1;
  int amdgpu_tmz = -1; /* auto */
- uint amdgpu_freesync_vid_mode;
  int amdgpu_reset_method = -1; /* auto */
  int amdgpu_num_kcq = -1;
  int amdgpu_smartshift_bias;
@@@ -843,32 -842,6 +843,6 @@@ module_param_named(backlight, amdgpu_ba
  MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
  module_param_named(tmz, amdgpu_tmz, int, 0444);
  
- /**
-  * DOC: freesync_video (uint)
-  * Enable the optimization to adjust front porch timing to achieve seamless
-  * mode change experience when setting a freesync supported mode for which full
-  * modeset is not needed.
-  *
-  * The Display Core will add a set of modes derived from the base FreeSync
-  * video mode into the corresponding connector's mode list based on commonly
-  * used refresh rates and VRR range of the connected display, when users enable
-  * this feature. From the userspace perspective, they can see a seamless mode
-  * change experience when the change between different refresh rates under the
-  * same resolution. Additionally, userspace applications such as Video playback
-  * can read this modeset list and change the refresh rate based on the video
-  * frame rate. Finally, the userspace can also derive an appropriate mode for a
-  * particular refresh rate based on the FreeSync Mode and add it to the
-  * connector's mode list.
-  *
-  * Note: This is an experimental feature.
-  *
-  * The default value: 0 (off).
-  */
- MODULE_PARM_DESC(
-       freesync_video,
-       "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
- module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
  /**
   * DOC: reset_method (int)
   * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
@@@ -1942,10 -1915,10 +1916,10 @@@ static const struct pci_device_id pciid
        {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
  
        /* Aldebaran */
-       {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
  
        /* CYAN_SKILLFISH */
        {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@@ -1974,26 -1947,28 +1948,48 @@@ MODULE_DEVICE_TABLE(pci, pciidlist)
  
  static const struct drm_driver amdgpu_kms_driver;
  
 +static bool amdgpu_is_fw_framebuffer(resource_size_t base,
 +                                   resource_size_t size)
 +{
 +      bool found = false;
 +#if IS_REACHABLE(CONFIG_FB)
 +      struct apertures_struct *a;
 +
 +      a = alloc_apertures(1);
 +      if (!a)
 +              return false;
 +
 +      a->ranges[0].base = base;
 +      a->ranges[0].size = size;
 +
 +      found = is_firmware_framebuffer(a);
 +      kfree(a);
 +#endif
 +      return found;
 +}
 +
+ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
+ {
+       struct pci_dev *p = NULL;
+       int i;
+       /* 0 - GPU
+        * 1 - audio
+        * 2 - USB
+        * 3 - UCSI
+        */
+       for (i = 1; i < 4; i++) {
+               p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                                               adev->pdev->bus->number, i);
+               if (p) {
+                       pm_runtime_get_sync(&p->dev);
+                       pm_runtime_mark_last_busy(&p->dev);
+                       pm_runtime_put_autosuspend(&p->dev);
+                       pci_dev_put(p);
+               }
+       }
+ }
  static int amdgpu_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
  {
        unsigned long flags = ent->driver_data;
        int ret, retry = 0, i;
        bool supports_atomic = false;
 +      bool is_fw_fb;
 +      resource_size_t base, size;
  
        /* skip devices which are owned by radeon */
        for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
        }
  #endif
  
 +      base = pci_resource_start(pdev, 0);
 +      size = pci_resource_len(pdev, 0);
 +      is_fw_fb = amdgpu_is_fw_framebuffer(base, size);
 +
        /* Get rid of things like offb */
        ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
        if (ret)
        adev->dev  = &pdev->dev;
        adev->pdev = pdev;
        ddev = adev_to_drm(adev);
 +      adev->is_fw_fb = is_fw_fb;
  
        if (!supports_atomic)
                ddev->driver_features &= ~DRIVER_ATOMIC;
@@@ -2126,6 -2094,48 +2122,48 @@@ retry_init
        if (ret)
                DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
  
+       if (adev->runpm) {
+               /* only need to skip on ATPX */
+               if (amdgpu_device_supports_px(ddev))
+                       dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+               /* we want direct complete for BOCO */
+               if (amdgpu_device_supports_boco(ddev))
+                       dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
+                                               DPM_FLAG_SMART_SUSPEND |
+                                               DPM_FLAG_MAY_SKIP_RESUME);
+               pm_runtime_use_autosuspend(ddev->dev);
+               pm_runtime_set_autosuspend_delay(ddev->dev, 5000);
+               pm_runtime_allow(ddev->dev);
+               pm_runtime_mark_last_busy(ddev->dev);
+               pm_runtime_put_autosuspend(ddev->dev);
+               /*
+                * For runpm implemented via BACO, PMFW will handle the
+                * timing for BACO in and out:
+                *   - put ASIC into BACO state only when both video and
+                *     audio functions are in D3 state.
+                *   - pull ASIC out of BACO state when either video or
+                *     audio function is in D0 state.
+                * Also, at startup, PMFW assumes both functions are in
+                * D0 state.
+                *
+                * So if snd driver was loaded prior to amdgpu driver
+                * and audio function was put into D3 state, there will
+                * be no PMFW-aware D-state transition(D0->D3) on runpm
+                * suspend. Thus the BACO will be not correctly kicked in.
+                *
+                * Via amdgpu_get_secondary_funcs(), the audio dev is put
+                * into D0 state. Then there will be a PMFW-aware D-state
+                * transition(D0->D3) on runpm suspend.
+                */
+               if (amdgpu_device_supports_baco(ddev) &&
+                   !(adev->flags & AMD_IS_APU) &&
+                   (adev->asic_type >= CHIP_NAVI10))
+                       amdgpu_get_secondary_funcs(adev);
+       }
        return 0;
  
  err_pci:
@@@ -2137,8 -2147,15 +2175,15 @@@ static voi
  amdgpu_pci_remove(struct pci_dev *pdev)
  {
        struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
  
        drm_dev_unplug(dev);
+       if (adev->runpm) {
+               pm_runtime_get_sync(dev->dev);
+               pm_runtime_forbid(dev->dev);
+       }
        amdgpu_driver_unload_kms(dev);
  
        /*
@@@ -2246,13 -2263,20 +2291,20 @@@ static void amdgpu_drv_delayed_reset_wo
  static int amdgpu_pmops_prepare(struct device *dev)
  {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
  
        /* Return a positive number here so
         * DPM_FLAG_SMART_SUSPEND works properly
         */
        if (amdgpu_device_supports_boco(drm_dev))
-               return pm_runtime_suspended(dev) &&
-                       pm_suspend_via_firmware();
+               return pm_runtime_suspended(dev);
+       /* if we will not support s3 or s2i for the device
+        *  then skip suspend
+        */
+       if (!amdgpu_acpi_is_s0ix_active(adev) &&
+           !amdgpu_acpi_is_s3_active(adev))
+               return 1;
  
        return 0;
  }
index 9b12cab5e60676ddf3a3d73c5f5f8c3b2259bdec,bab6500728cb04cd732f283b081a72e723cfbe80..57b74d35052fbf067c0ddffae523f32d30bb3ee3
@@@ -222,10 -222,16 +222,10 @@@ static void amdgpu_gem_object_close(str
        if (!bo_va || --bo_va->ref_count)
                goto out_unlock;
  
-       amdgpu_vm_bo_rmv(adev, bo_va);
+       amdgpu_vm_bo_del(adev, bo_va);
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
  
 -      fence = dma_resv_excl_fence(bo->tbo.base.resv);
 -      if (fence) {
 -              amdgpu_bo_fence(bo, fence, true);
 -              fence = NULL;
 -      }
 -
        r = amdgpu_vm_clear_freed(adev, vm, &fence);
        if (r || !fence)
                goto out_unlock;
index e0c7fbe01d93939d3df1b81dcde1cd7e4c00c420,c5263908caec05044246584077684b371b838f7d..899a47011a6796d45c8dd6b3724758430b05cce7
@@@ -167,7 -167,6 +167,7 @@@ static int amdgpu_gtt_mgr_new(struct tt
        return 0;
  
  err_free:
 +      ttm_resource_fini(man, &node->base.base);
        kfree(node);
  
  err_out:
@@@ -199,7 -198,6 +199,7 @@@ static void amdgpu_gtt_mgr_del(struct t
        if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
                atomic64_sub(res->num_pages, &mgr->used);
  
 +      ttm_resource_fini(man, res);
        kfree(node);
  }
  
@@@ -222,26 -220,21 +222,21 @@@ uint64_t amdgpu_gtt_mgr_usage(struct am
   *
   * Re-init the gart for each known BO in the GTT.
   */
int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
  {
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        struct amdgpu_device *adev;
-       int r = 0;
  
        adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-               r = amdgpu_ttm_recover_gart(node->tbo);
-               if (r)
-                       break;
+               amdgpu_ttm_recover_gart(node->tbo);
        }
        spin_unlock(&mgr->lock);
  
        amdgpu_gart_invalidate_tlb(adev);
-       return r;
  }
  
  /**
@@@ -288,8 -281,7 +283,8 @@@ int amdgpu_gtt_mgr_init(struct amdgpu_d
        man->use_tt = true;
        man->func = &amdgpu_gtt_mgr_func;
  
 -      ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);
 +      ttm_resource_manager_init(man, &adev->mman.bdev,
 +                                gtt_size >> PAGE_SHIFT);
  
        start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
        size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
index 1ebb91db22743badd64deaae581f2a9227f295bf,06fefe0b589c17e111a9deaf07a95b8e94612126..9f985bd463be2ee83ee7e562af7a43949a68d342
@@@ -87,11 -87,6 +87,6 @@@ void amdgpu_driver_unload_kms(struct dr
        if (adev->rmmio == NULL)
                return;
  
-       if (adev->runpm) {
-               pm_runtime_get_sync(dev->dev);
-               pm_runtime_forbid(dev->dev);
-       }
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
                DRM_WARN("smart shift update failed\n");
  
@@@ -124,22 -119,6 +119,6 @@@ void amdgpu_register_gpu_instance(struc
        mutex_unlock(&mgpu_info.mutex);
  }
  
- static void amdgpu_get_audio_func(struct amdgpu_device *adev)
- {
-       struct pci_dev *p = NULL;
-       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
-                       adev->pdev->bus->number, 1);
-       if (p) {
-               pm_runtime_get_sync(&p->dev);
-               pm_runtime_mark_last_busy(&p->dev);
-               pm_runtime_put_autosuspend(&p->dev);
-               pci_dev_put(p);
-       }
- }
  /**
   * amdgpu_driver_load_kms - Main load function for KMS.
   *
  int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
  {
        struct drm_device *dev;
-       struct pci_dev *parent;
        int r, acpi_status;
  
        dev = adev_to_drm(adev);
  
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           ((flags & AMD_IS_APU) == 0) &&
-           !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
-               flags |= AMD_IS_PX;
-       parent = pci_upstream_bridge(adev->pdev);
-       adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
        /* amdgpu_device_init should report only fatal error
         * like memory allocation failure or iomapping failure,
         * or memory manager initialization failure, it must
                        adev->runpm = true;
                        break;
                }
 +              /* XXX: disable runtime pm if we are the primary adapter
 +               * to avoid displays being re-enabled after DPMS.
 +               * This needs to be sorted out and fixed properly.
 +               */
 +              if (adev->is_fw_fb)
 +                      adev->runpm = false;
                if (adev->runpm)
                        dev_info(adev->dev, "Using BACO for runtime pm\n");
        }
        if (acpi_status)
                dev_dbg(dev->dev, "Error during ACPI methods call\n");
  
-       if (adev->runpm) {
-               /* only need to skip on ATPX */
-               if (amdgpu_device_supports_px(dev))
-                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
-               /* we want direct complete for BOCO */
-               if (amdgpu_device_supports_boco(dev))
-                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
-                                               DPM_FLAG_SMART_SUSPEND |
-                                               DPM_FLAG_MAY_SKIP_RESUME);
-               pm_runtime_use_autosuspend(dev->dev);
-               pm_runtime_set_autosuspend_delay(dev->dev, 5000);
-               pm_runtime_allow(dev->dev);
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-               /*
-                * For runpm implemented via BACO, PMFW will handle the
-                * timing for BACO in and out:
-                *   - put ASIC into BACO state only when both video and
-                *     audio functions are in D3 state.
-                *   - pull ASIC out of BACO state when either video or
-                *     audio function is in D0 state.
-                * Also, at startup, PMFW assumes both functions are in
-                * D0 state.
-                *
-                * So if snd driver was loaded prior to amdgpu driver
-                * and audio function was put into D3 state, there will
-                * be no PMFW-aware D-state transition(D0->D3) on runpm
-                * suspend. Thus the BACO will be not correctly kicked in.
-                *
-                * Via amdgpu_get_audio_func(), the audio dev is put
-                * into D0 state. Then there will be a PMFW-aware D-state
-                * transition(D0->D3) on runpm suspend.
-                */
-               if (amdgpu_device_supports_baco(dev) &&
-                   !(adev->flags & AMD_IS_APU) &&
-                   (adev->asic_type >= CHIP_NAVI10))
-                       amdgpu_get_audio_func(adev);
-       }
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
                DRM_WARN("smart shift update failed\n");
  
  out:
-       if (r) {
-               /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
-               if (adev->rmmio && adev->runpm)
-                       pm_runtime_put_noidle(dev->dev);
+       if (r)
                amdgpu_driver_unload_kms(dev);
-       }
  
        return r;
  }
@@@ -406,6 -322,10 +328,10 @@@ static int amdgpu_firmware_info(struct 
                fw_info->ver = adev->psp.toc.fw_version;
                fw_info->feature = adev->psp.toc.feature_version;
                break;
+       case AMDGPU_INFO_FW_CAP:
+               fw_info->ver = adev->psp.cap_fw_version;
+               fw_info->feature = adev->psp.cap_feature_version;
+               break;
        default:
                return -EINVAL;
        }
@@@ -1268,18 -1188,20 +1194,20 @@@ void amdgpu_driver_postclose_kms(struc
        if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
                amdgpu_vce_free_handles(adev, file_priv);
  
-       amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
-               amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+               amdgpu_vm_bo_del(adev, fpriv->csa_va);
                fpriv->csa_va = NULL;
                amdgpu_bo_unreserve(adev->virt.csa_obj);
        }
  
        pasid = fpriv->vm.pasid;
        pd = amdgpu_bo_ref(fpriv->vm.root.bo);
+       if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
+               amdgpu_vm_bo_del(adev, fpriv->prt_va);
+               amdgpu_bo_unreserve(pd);
+       }
  
        amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
        amdgpu_vm_fini(adev, &fpriv->vm);
@@@ -1427,8 -1349,7 +1355,7 @@@ static int amdgpu_debugfs_firmware_info
        struct drm_amdgpu_info_firmware fw_info;
        struct drm_amdgpu_query_fw query_fw;
        struct atom_context *ctx = adev->mode_info.atom_context;
-       uint8_t smu_minor, smu_debug;
-       uint16_t smu_major;
+       uint8_t smu_program, smu_major, smu_minor, smu_debug;
        int ret, i;
  
        static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
        ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
        if (ret)
                return ret;
-       smu_major = (fw_info.ver >> 16) & 0xffff;
+       smu_program = (fw_info.ver >> 24) & 0xff;
+       smu_major = (fw_info.ver >> 16) & 0xff;
        smu_minor = (fw_info.ver >> 8) & 0xff;
        smu_debug = (fw_info.ver >> 0) & 0xff;
-       seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n",
-                  fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug);
+       seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
+                  fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
  
        /* SDMA */
        query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
        seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
                   fw_info.feature, fw_info.ver);
  
+       /* CAP */
+       if (adev->psp.cap_fw) {
+               query_fw.fw_type = AMDGPU_INFO_FW_CAP;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
+                               fw_info.feature, fw_info.ver);
+       }
        seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
  
        return 0;
index 8f47c14ecbc7ee833237fc3a87a7213acfed4c63,5934326b9db39f3a6869bf3a9799c0f8e9baff73..a9c133a09be5643917901dcad5cbcd5f40c3a149
@@@ -75,6 -75,13 +75,13 @@@ const char *ras_mca_block_string[] = 
        "mca_iohc",
  };
  
+ struct amdgpu_ras_block_list {
+       /* ras block link */
+       struct list_head node;
+       struct amdgpu_ras_block_object *ras_obj;
+ };
  const char *get_ras_block_str(struct ras_common_if *ras_block)
  {
        if (!ras_block)
@@@ -89,6 -96,9 +96,9 @@@
        return ras_block_string[ras_block->block];
  }
  
+ #define ras_block_str(_BLOCK_) \
+       (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
  #define ras_err_str(i) (ras_error_string[ffs(i)])
  
  #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@@ -155,14 -165,9 +165,9 @@@ static int amdgpu_reserve_page_direct(s
        }
  
        memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
-       err_rec.address = address;
-       err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
-       err_rec.ts = (uint64_t)ktime_get_real_seconds();
-       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
        err_data.err_addr = &err_rec;
-       err_data.err_addr_cnt = 1;
+       amdgpu_umc_fill_error_record(&err_data, address,
+                       (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
  
        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@@ -452,7 -457,7 +457,7 @@@ static ssize_t amdgpu_ras_debugfs_ctrl_
        }
  
        if (ret)
-               return -EINVAL;
+               return ret;
  
        return size;
  }
@@@ -866,30 -871,47 +871,47 @@@ static int amdgpu_ras_enable_all_featur
  }
  /* feature ctl end */
  
+ static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
+               enum amdgpu_ras_block block)
+ {
+       if (!block_obj)
+               return -EINVAL;
+       if (block_obj->block == block)
+               return 0;
  
- static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
-                                             struct ras_common_if *ras_block,
-                                             struct ras_err_data  *err_data)
+       return -EINVAL;
+ }
+ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
+                                       enum amdgpu_ras_block block, uint32_t sub_block_index)
  {
-       switch (ras_block->sub_block_index) {
-       case AMDGPU_RAS_MCA_BLOCK__MP0:
-               if (adev->mca.mp0.ras_funcs &&
-                   adev->mca.mp0.ras_funcs->query_ras_error_count)
-                       adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_MCA_BLOCK__MP1:
-               if (adev->mca.mp1.ras_funcs &&
-                   adev->mca.mp1.ras_funcs->query_ras_error_count)
-                       adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_MCA_BLOCK__MPIO:
-               if (adev->mca.mpio.ras_funcs &&
-                   adev->mca.mpio.ras_funcs->query_ras_error_count)
-                       adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       default:
-               break;
+       struct amdgpu_ras_block_list *node, *tmp;
+       struct amdgpu_ras_block_object *obj;
+       if (block >= AMDGPU_RAS_BLOCK__LAST)
+               return NULL;
+       if (!amdgpu_ras_is_supported(adev, block))
+               return NULL;
+       list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+               if (!node->ras_obj) {
+                       dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+                       continue;
+               }
+               obj = node->ras_obj;
+               if (obj->ras_block_match) {
+                       if (obj->ras_block_match(obj, block, sub_block_index) == 0)
+                               return obj;
+               } else {
+                       if (amdgpu_ras_block_match_default(obj, block) == 0)
+                               return obj;
+               }
        }
+       return NULL;
  }
  
  static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
         * choosing right query method according to
         * whether smu support query error information
         */
-       ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc));
+       ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
        if (ret == -EOPNOTSUPP) {
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->query_ras_error_count)
-                       adev->umc.ras_funcs->query_ras_error_count(adev, err_data);
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
  
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->query_ras_error_address)
-                       adev->umc.ras_funcs->query_ras_error_address(adev, err_data);
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                   adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
        } else if (!ret) {
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_count)
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data);
+               if (adev->umc.ras &&
+                       adev->umc.ras->ecc_info_query_ras_error_count)
+                       adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
  
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_address)
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data);
+               if (adev->umc.ras &&
+                       adev->umc.ras->ecc_info_query_ras_error_address)
+                       adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
        }
  }
  
  int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
                                  struct ras_query_if *info)
  {
+       struct amdgpu_ras_block_object *block_obj = NULL;
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ras_err_data err_data = {0, 0, 0, NULL};
-       int i;
  
        if (!obj)
                return -EINVAL;
  
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__UMC:
+       if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
                amdgpu_ras_get_ecc_info(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__SDMA:
-               if (adev->sdma.funcs->query_ras_error_count) {
-                       for (i = 0; i < adev->sdma.num_instances; i++)
-                               adev->sdma.funcs->query_ras_error_count(adev, i,
-                                                                       &err_data);
+       } else {
+               block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
+               if (!block_obj || !block_obj->hw_ops)   {
+                       dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                                    get_ras_block_str(&info->head));
+                       return -EINVAL;
                }
-               break;
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_count)
-                       adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_status)
-                       adev->gfx.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_count)
-                       adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_status)
-                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__PCIE_BIF:
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->query_ras_error_count)
-                       adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               if (adev->gmc.xgmi.ras_funcs &&
-                   adev->gmc.xgmi.ras_funcs->query_ras_error_count)
-                       adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__HDP:
-               if (adev->hdp.ras_funcs &&
-                   adev->hdp.ras_funcs->query_ras_error_count)
-                       adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__MCA:
-               amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
-               break;
-       default:
-               break;
+               if (block_obj->hw_ops->query_ras_error_count)
+                       block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+               if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
+                   (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
+                   (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
+                               if (block_obj->hw_ops->query_ras_error_status)
+                                       block_obj->hw_ops->query_ras_error_status(adev);
+                       }
        }
  
        obj->err_data.ue_count += err_data.ue_count;
  int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
                enum amdgpu_ras_block block)
  {
+       struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
        if (!amdgpu_ras_is_supported(adev, block))
                return -EINVAL;
  
-       switch (block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->reset_ras_error_count)
-                       adev->gfx.ras_funcs->reset_ras_error_count(adev);
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->reset_ras_error_status)
-                       adev->gfx.ras_funcs->reset_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_count)
-                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_status)
-                       adev->mmhub.ras_funcs->reset_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__SDMA:
-               if (adev->sdma.funcs->reset_ras_error_count)
-                       adev->sdma.funcs->reset_ras_error_count(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__HDP:
-               if (adev->hdp.ras_funcs &&
-                   adev->hdp.ras_funcs->reset_ras_error_count)
-                       adev->hdp.ras_funcs->reset_ras_error_count(adev);
-               break;
-       default:
-               break;
+       if (!block_obj || !block_obj->hw_ops)   {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            ras_block_str(block));
+               return -EINVAL;
        }
  
-       return 0;
- }
- /* Trigger XGMI/WAFL error */
- static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
-                                struct ta_ras_trigger_error_input *block_info)
- {
-       int ret;
+       if (block_obj->hw_ops->reset_ras_error_count)
+               block_obj->hw_ops->reset_ras_error_count(adev);
  
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
-               dev_warn(adev->dev, "Failed to disallow df cstate");
-       if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
-               dev_warn(adev->dev, "Failed to disallow XGMI power down");
-       ret = psp_ras_trigger_error(&adev->psp, block_info);
-       if (amdgpu_ras_intr_triggered())
-               return ret;
-       if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
-               dev_warn(adev->dev, "Failed to allow XGMI power down");
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
-               dev_warn(adev->dev, "Failed to allow df cstate");
+       if ((block == AMDGPU_RAS_BLOCK__GFX) ||
+           (block == AMDGPU_RAS_BLOCK__MMHUB)) {
+               if (block_obj->hw_ops->reset_ras_error_status)
+                       block_obj->hw_ops->reset_ras_error_status(adev);
+       }
  
-       return ret;
+       return 0;
  }
  
  /* wrapper of psp_ras_trigger_error */
@@@ -1116,11 -1067,20 +1067,20 @@@ int amdgpu_ras_error_inject(struct amdg
                .address = info->address,
                .value = info->value,
        };
-       int ret = 0;
+       int ret = -EINVAL;
+       struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
+                                                       info->head.block,
+                                                       info->head.sub_block_index);
  
        if (!obj)
                return -EINVAL;
  
+       if (!block_obj || !block_obj->hw_ops)   {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            get_ras_block_str(&info->head));
+               return -EINVAL;
+       }
        /* Calculate XGMI relative offset */
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
                block_info.address =
                                                          block_info.address);
        }
  
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->ras_error_inject)
-                       ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
-               else
-                       ret = -EINVAL;
-               break;
-       case AMDGPU_RAS_BLOCK__UMC:
-       case AMDGPU_RAS_BLOCK__SDMA:
-       case AMDGPU_RAS_BLOCK__MMHUB:
-       case AMDGPU_RAS_BLOCK__PCIE_BIF:
-       case AMDGPU_RAS_BLOCK__MCA:
-               ret = psp_ras_trigger_error(&adev->psp, &block_info);
-               break;
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
-               break;
-       default:
-               dev_info(adev->dev, "%s error injection is not supported yet\n",
-                        get_ras_block_str(&info->head));
-               ret = -EINVAL;
+       if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
+               if (block_obj->hw_ops->ras_error_inject)
+                       ret = block_obj->hw_ops->ras_error_inject(adev, info);
+       } else {
+               /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
+               if (block_obj->hw_ops->ras_error_inject)
+                       ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
+               else  /*If not defined .ras_error_inject, use default ras_error_inject*/
+                       ret = psp_ras_trigger_error(&adev->psp, &block_info);
        }
  
        if (ret)
@@@ -1766,24 -1713,28 +1713,28 @@@ static void amdgpu_ras_log_on_err_count
  static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
                                          struct ras_query_if *info)
  {
+       struct amdgpu_ras_block_object *block_obj;
        /*
         * Only two block need to query read/write
         * RspStatus at current state
         */
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_status)
-                       adev->gfx.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_status)
-                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
-               break;
-       default:
-               break;
+       if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
+               (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
+               return;
+       block_obj = amdgpu_ras_get_ras_block(adev,
+                                       info->head.block,
+                                       info->head.sub_block_index);
+       if (!block_obj || !block_obj->hw_ops) {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            get_ras_block_str(&info->head));
+               return;
        }
+       if (block_obj->hw_ops->query_ras_error_status)
+               block_obj->hw_ops->query_ras_error_status(adev);
  }
  
  static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
@@@ -2141,8 -2092,7 +2092,7 @@@ int amdgpu_ras_recovery_init(struct amd
                if (ret)
                        goto free;
  
-               if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
-                       adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+               amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
        }
  
  #ifdef CONFIG_X86_MCE_AMD
@@@ -2348,24 -2298,26 +2298,26 @@@ int amdgpu_ras_init(struct amdgpu_devic
        case CHIP_VEGA20:
        case CHIP_ARCTURUS:
        case CHIP_ALDEBARAN:
-               if (!adev->gmc.xgmi.connected_to_cpu)
-                       adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+               if (!adev->gmc.xgmi.connected_to_cpu) {
+                       adev->nbio.ras = &nbio_v7_4_ras;
+                       amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
+               }
                break;
        default:
                /* nbio ras is not available */
                break;
        }
  
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->init_ras_controller_interrupt) {
-               r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
+       if (adev->nbio.ras &&
+           adev->nbio.ras->init_ras_controller_interrupt) {
+               r = adev->nbio.ras->init_ras_controller_interrupt(adev);
                if (r)
                        goto release_con;
        }
  
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
-               r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
+       if (adev->nbio.ras &&
+           adev->nbio.ras->init_ras_err_event_athub_interrupt) {
+               r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
                if (r)
                        goto release_con;
        }
        }
        else if (adev->df.funcs &&
            adev->df.funcs->query_ras_poison_mode &&
-           adev->umc.ras_funcs &&
-           adev->umc.ras_funcs->query_ras_poison_mode) {
+           adev->umc.ras &&
+           adev->umc.ras->query_ras_poison_mode) {
                df_poison =
                        adev->df.funcs->query_ras_poison_mode(adev);
                umc_poison =
-                       adev->umc.ras_funcs->query_ras_poison_mode(adev);
+                       adev->umc.ras->query_ras_poison_mode(adev);
                /* Only poison is set in both DF and UMC, we can support it */
                if (df_poison && umc_poison)
                        con->poison_supported = true;
@@@ -2585,6 -2537,7 +2537,7 @@@ int amdgpu_ras_pre_fini(struct amdgpu_d
  
  int amdgpu_ras_fini(struct amdgpu_device *adev)
  {
+       struct amdgpu_ras_block_list *ras_node, *tmp;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
  
        if (!adev->ras_enabled || !con)
        amdgpu_ras_set_context(adev, NULL);
        kfree(con);
  
+       /* Clear ras blocks from ras_list and free ras block list node */
+       list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
+               list_del(&ras_node->node);
+               kfree(ras_node);
+       }
        return 0;
  }
  
@@@ -2685,7 -2644,7 +2644,7 @@@ static int amdgpu_bad_page_notifier(str
         * and error occurred in DramECC (Extended error code = 0) then only
         * process the error, else bail out.
         */
 -      if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
 +      if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
                    (XEC(m->status, 0x3f) == 0x0)))
                return NOTIFY_DONE;
  
        dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
                             umc_inst, ch_inst);
  
-       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
        /*
         * Translate UMC channel address to Physical address
         */
                        ADDR_OF_256B_BLOCK(channel_index) |
                        OFFSET_IN_256B_BLOCK(m->addr);
  
-       err_rec.address = m->addr;
-       err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-       err_rec.ts = (uint64_t)ktime_get_real_seconds();
-       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-       err_rec.cu = 0;
-       err_rec.mem_channel = channel_index;
-       err_rec.mcumc_id = umc_inst;
+       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
        err_data.err_addr = &err_rec;
-       err_data.err_addr_cnt = 1;
+       amdgpu_umc_fill_error_record(&err_data, m->addr,
+                       retired_page, channel_index, umc_inst);
  
        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@@ -2777,3 -2728,63 +2728,63 @@@ static void amdgpu_register_bad_pages_m
        }
  }
  #endif
+ struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
+ {
+       if (!adev)
+               return NULL;
+       return adev->psp.ras_context.ras;
+ }
+ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
+ {
+       if (!adev)
+               return -EINVAL;
+       adev->psp.ras_context.ras = ras_con;
+       return 0;
+ }
+ /* check if ras is supported on block, say, sdma, gfx */
+ int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+               unsigned int block)
+ {
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       if (block >= AMDGPU_RAS_BLOCK_COUNT)
+               return 0;
+       return ras && (adev->ras_enabled & (1 << block));
+ }
+ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
+ {
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+               schedule_work(&ras->recovery_work);
+       return 0;
+ }
+ /* Register each ip ras block into amdgpu ras */
+ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+               struct amdgpu_ras_block_object *ras_block_obj)
+ {
+       struct amdgpu_ras_block_list *ras_node;
+       if (!adev || !ras_block_obj)
+               return -EINVAL;
+       if (!amdgpu_ras_asic_supported(adev))
+               return 0;
+       ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
+       if (!ras_node)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&ras_node->node);
+       ras_node->ras_obj = ras_block_obj;
+       list_add_tail(&ras_node->node, &adev->ras_list);
+       return 0;
+ }
index d178fbec70489523711d0c9f940ed7483d747df6,41d6f604813ddfef867ab4cfeec9a55e6eb17bb5..414a22dddc78d03d66729cf08ed2cb4ae5baad5c
@@@ -50,6 -50,7 +50,7 @@@
  #include <drm/ttm/ttm_range_manager.h>
  
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_drv.h>
  
  #include "amdgpu.h"
  #include "amdgpu_object.h"
@@@ -170,10 -171,10 +171,10 @@@ static void amdgpu_evict_flags(struct t
   * @bo: buffer object to map
   * @mem: memory object to map
   * @mm_cur: range to map
-  * @num_pages: number of pages to map
   * @window: which GART window to use
   * @ring: DMA ring to use for the copy
   * @tmz: if we should setup a TMZ enabled mapping
+  * @size: in number of bytes to map, out number of bytes mapped
   * @addr: resulting address inside the MC address space
   *
   * Setup one of the GART windows to access a specific piece of memory or return
  static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                                 struct ttm_resource *mem,
                                 struct amdgpu_res_cursor *mm_cur,
-                                unsigned num_pages, unsigned window,
-                                struct amdgpu_ring *ring, bool tmz,
-                                uint64_t *addr)
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *size, uint64_t *addr)
  {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       struct dma_fence *fence;
+       unsigned offset, num_pages, num_dw, num_bytes;
        uint64_t src_addr, dst_addr;
+       struct dma_fence *fence;
+       struct amdgpu_job *job;
        void *cpu_addr;
        uint64_t flags;
        unsigned int i;
  
        BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
               AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-       BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
+       if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
+               return -EINVAL;
  
        /* Map only what can't be accessed directly */
        if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
                return 0;
        }
  
+       /*
+        * If start begins at an offset inside the page, then adjust the size
+        * and addr accordingly
+        */
+       offset = mm_cur->start & ~PAGE_MASK;
+       num_pages = PFN_UP(*size + offset);
+       num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
+       *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
        *addr = adev->gmc.gart_start;
        *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
                AMDGPU_GPU_PAGE_SIZE;
-       *addr += mm_cur->start & ~PAGE_MASK;
+       *addr += offset;
  
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
                dma_addr_t *dma_addr;
  
                dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
-               r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
-                                   cpu_addr);
-               if (r)
-                       goto error_free;
+               amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
        } else {
                dma_addr_t dma_address;
  
                dma_address += adev->vm_manager.vram_base_offset;
  
                for (i = 0; i < num_pages; ++i) {
-                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
-                                           &dma_address, flags, cpu_addr);
-                       if (r)
-                               goto error_free;
+                       amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+                                       flags, cpu_addr);
                        dma_address += PAGE_SIZE;
                }
        }
@@@ -297,9 -305,6 +305,6 @@@ int amdgpu_ttm_copy_mem_to_mem(struct a
                               struct dma_resv *resv,
                               struct dma_fence **f)
  {
-       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct amdgpu_res_cursor src_mm, dst_mm;
        struct dma_fence *fence = NULL;
  
        mutex_lock(&adev->mman.gtt_window_lock);
        while (src_mm.remaining) {
-               uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
-               uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
+               uint64_t from, to, cur_size;
                struct dma_fence *next;
-               uint32_t cur_size;
-               uint64_t from, to;
  
-               /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
-                * begins at an offset, then adjust the size accordingly
-                */
-               cur_size = max(src_page_offset, dst_page_offset);
-               cur_size = min(min3(src_mm.size, dst_mm.size, size),
-                              (uint64_t)(GTT_MAX_BYTES - cur_size));
+               /* Never copy more than 256MiB at once to avoid a timeout */
+               cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
  
                /* Map src to window 0 and dst to window 1. */
                r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
-                                         PFN_UP(cur_size + src_page_offset),
-                                         0, ring, tmz, &from);
+                                         0, ring, tmz, &cur_size, &from);
                if (r)
                        goto error;
  
                r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
-                                         PFN_UP(cur_size + dst_page_offset),
-                                         1, ring, tmz, &to);
+                                         1, ring, tmz, &cur_size, &to);
                if (r)
                        goto error;
  
@@@ -396,8 -392,7 +392,7 @@@ static int amdgpu_move_blit(struct ttm_
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
  
-               r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
-                                      NULL, &wipe_fence);
+               r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
@@@ -821,14 -816,13 +816,13 @@@ static void amdgpu_ttm_tt_unpin_userptr
  #endif
  }
  
- static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
-                               struct ttm_buffer_object *tbo,
-                               uint64_t flags)
+ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+                                struct ttm_buffer_object *tbo,
+                                uint64_t flags)
  {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
        struct ttm_tt *ttm = tbo->ttm;
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
  
        if (amdgpu_bo_encrypted(abo))
                flags |= AMDGPU_PTE_TMZ;
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
  
-               r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
-                               gtt->ttm.dma_address, flags);
-               if (r)
-                       goto gart_bind_fail;
+               amdgpu_gart_bind(adev, gtt->offset, page_idx,
+                                gtt->ttm.dma_address, flags);
  
                /* The memory type of the first page defaults to UC. Now
                 * modify the memory type to NC from the second page of
                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
  
-               r = amdgpu_gart_bind(adev,
-                               gtt->offset + (page_idx << PAGE_SHIFT),
-                               ttm->num_pages - page_idx,
-                               &(gtt->ttm.dma_address[page_idx]), flags);
+               amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
+                                ttm->num_pages - page_idx,
+                                &(gtt->ttm.dma_address[page_idx]), flags);
        } else {
-               r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-                                    gtt->ttm.dma_address, flags);
+               amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                                gtt->ttm.dma_address, flags);
        }
- gart_bind_fail:
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
-       return r;
  }
  
  /*
@@@ -878,7 -862,7 +862,7 @@@ static int amdgpu_ttm_backend_bind(stru
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        uint64_t flags;
-       int r = 0;
+       int r;
  
        if (!bo_mem)
                return -EINVAL;
  
        /* bind pages into GART page tables */
        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-               gtt->ttm.dma_address, flags);
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
+       amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                        gtt->ttm.dma_address, flags);
        gtt->bound = true;
-       return r;
+       return 0;
  }
  
  /*
@@@ -982,12 -962,7 +962,7 @@@ int amdgpu_ttm_alloc_gart(struct ttm_bu
  
        /* Bind pages */
        gtt->offset = (u64)tmp->start << PAGE_SHIFT;
-       r = amdgpu_ttm_gart_bind(adev, bo, flags);
-       if (unlikely(r)) {
-               ttm_resource_free(bo, &tmp);
-               return r;
-       }
+       amdgpu_ttm_gart_bind(adev, bo, flags);
        amdgpu_gart_invalidate_tlb(adev);
        ttm_resource_free(bo, &bo->resource);
        ttm_bo_assign_mem(bo, tmp);
   * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
   * rebind GTT pages during a GPU reset.
   */
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        uint64_t flags;
-       int r;
  
        if (!tbo->ttm)
-               return 0;
+               return;
  
        flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
-       r = amdgpu_ttm_gart_bind(adev, tbo, flags);
-       return r;
+       amdgpu_ttm_gart_bind(adev, tbo, flags);
  }
  
  /*
@@@ -1027,7 -999,6 +999,6 @@@ static void amdgpu_ttm_backend_unbind(s
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
  
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr) {
                return;
  
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
-       if (r)
-               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
-                         gtt->ttm.num_pages, gtt->offset);
+       amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        gtt->bound = false;
  }
  
@@@ -1168,6 -1136,26 +1136,26 @@@ static void amdgpu_ttm_tt_unpopulate(st
        return ttm_pool_free(&adev->mman.bdev.pool, ttm);
  }
  
+ /**
+  * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
+  * task
+  *
+  * @tbo: The ttm_buffer_object that contains the userptr
+  * @user_addr:  The returned value
+  */
+ int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+                             uint64_t *user_addr)
+ {
+       struct amdgpu_ttm_tt *gtt;
+       if (!tbo->ttm)
+               return -EINVAL;
+       gtt = (void *)tbo->ttm;
+       *user_addr = gtt->userptr;
+       return 0;
+ }
  /**
   * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
   * task
@@@ -1433,6 -1421,63 +1421,63 @@@ static void amdgpu_ttm_vram_mm_access(s
        }
  }
  
+ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
+                                       unsigned long offset, void *buf, int len, int write)
+ {
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+       struct amdgpu_res_cursor src_mm;
+       struct amdgpu_job *job;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       unsigned int num_dw;
+       int r, idx;
+       if (len != PAGE_SIZE)
+               return -EINVAL;
+       if (!adev->mman.sdma_access_ptr)
+               return -EACCES;
+       if (!drm_dev_enter(adev_to_drm(adev), &idx))
+               return -ENODEV;
+       if (write)
+               memcpy(adev->mman.sdma_access_ptr, buf, len);
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               goto out;
+       amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+       src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+       dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+       if (write)
+               swap(src_addr, dst_addr);
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+       amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+       r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r) {
+               amdgpu_job_free(job);
+               goto out;
+       }
+       if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+               r = -ETIMEDOUT;
+       dma_fence_put(fence);
+       if (!(r || write))
+               memcpy(buf, adev->mman.sdma_access_ptr, len);
+ out:
+       drm_dev_exit(idx);
+       return r;
+ }
  /**
   * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
   *
@@@ -1457,6 -1502,10 +1502,10 @@@ static int amdgpu_ttm_access_memory(str
        if (bo->resource->mem_type != TTM_PL_VRAM)
                return -EIO;
  
+       if (amdgpu_device_has_timeouts_enabled(adev) &&
+                       !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
+               return len;
        amdgpu_res_first(bo->resource, offset, len, &cursor);
        while (cursor.remaining) {
                size_t count, size = cursor.size;
@@@ -1797,6 -1846,12 +1846,12 @@@ int amdgpu_ttm_init(struct amdgpu_devic
                return r;
        }
  
+       if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_GTT,
+                               &adev->mman.sdma_access_bo, NULL,
+                               &adev->mman.sdma_access_ptr))
+               DRM_WARN("Debug VRAM access will use slowpath MM access\n");
        return 0;
  }
  
@@@ -1818,6 -1873,8 +1873,8 @@@ void amdgpu_ttm_fini(struct amdgpu_devi
        if (adev->mman.stolen_reserved_size)
                amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
                                      NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+                                       &adev->mman.sdma_access_ptr);
        amdgpu_ttm_fw_reserve_vram_fini(adev);
  
        if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@@ -1888,23 -1945,55 +1945,55 @@@ void amdgpu_ttm_set_buffer_funcs_status
        adev->mman.buffer_funcs_enabled = enable;
  }
  
+ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+                                 bool direct_submit,
+                                 unsigned int num_dw,
+                                 struct dma_resv *resv,
+                                 bool vm_needs_flush,
+                                 struct amdgpu_job **job)
+ {
+       enum amdgpu_ib_pool_type pool = direct_submit ?
+               AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
+       int r;
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+       if (r)
+               return r;
+       if (vm_needs_flush) {
+               (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+                                                       adev->gmc.pdb0_bo :
+                                                       adev->gart.bo);
+               (*job)->vm_needs_flush = true;
+       }
+       if (resv) {
+               r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
+               if (r) {
+                       DRM_ERROR("sync failed (%d).\n", r);
+                       amdgpu_job_free(*job);
+                       return r;
+               }
+       }
+       return 0;
+ }
  int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush, bool tmz)
  {
-       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
-               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
+       unsigned num_loops, num_dw;
        struct amdgpu_job *job;
        uint32_t max_bytes;
-       unsigned num_loops, num_dw;
        unsigned i;
        int r;
  
-       if (direct_submit && !ring->sched.ready) {
+       if (!direct_submit && !ring->sched.ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
+       r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
+                                  resv, vm_needs_flush, &job);
        if (r)
                return r;
  
-       if (vm_needs_flush) {
-               job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
-                                       adev->gmc.pdb0_bo : adev->gart.bo);
-               job->vm_needs_flush = true;
-       }
-       if (resv) {
-               r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_SYNC_ALWAYS,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
-               if (r) {
-                       DRM_ERROR("sync failed (%d).\n", r);
-                       goto error_free;
-               }
-       }
        for (i = 0; i < num_loops; i++) {
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  
@@@ -1961,77 -2035,35 +2035,35 @@@ error_free
        return r;
  }
  
- int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-                      uint32_t src_data,
-                      struct dma_resv *resv,
-                      struct dma_fence **fence)
+ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
+                              uint64_t dst_addr, uint32_t byte_count,
+                              struct dma_resv *resv,
+                              struct dma_fence **fence,
+                              bool vm_needs_flush)
  {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
-       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-       struct amdgpu_res_cursor cursor;
+       struct amdgpu_device *adev = ring->adev;
        unsigned int num_loops, num_dw;
-       uint64_t num_bytes;
        struct amdgpu_job *job;
+       uint32_t max_bytes;
+       unsigned int i;
        int r;
  
-       if (!adev->mman.buffer_funcs_enabled) {
-               DRM_ERROR("Trying to clear memory with ring turned off.\n");
-               return -EINVAL;
-       }
-       if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
-               DRM_ERROR("Trying to clear preemptible memory.\n");
-               return -EINVAL;
-       }
-       if (bo->tbo.resource->mem_type == TTM_PL_TT) {
-               r = amdgpu_ttm_alloc_gart(&bo->tbo);
-               if (r)
-                       return r;
-       }
-       num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
-       num_loops = 0;
-       amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
-       while (cursor.remaining) {
-               num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
-               amdgpu_res_next(&cursor, cursor.size);
-       }
-       num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
-       /* for IB padding */
-       num_dw += 64;
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
-                                    &job);
+       max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+       num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
+       num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
+       r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
+                                  &job);
        if (r)
                return r;
  
-       if (resv) {
-               r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_SYNC_ALWAYS,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
-               if (r) {
-                       DRM_ERROR("sync failed (%d).\n", r);
-                       goto error_free;
-               }
-       }
-       amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
-       while (cursor.remaining) {
-               uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
-               uint64_t dst_addr = cursor.start;
+       for (i = 0; i < num_loops; i++) {
+               uint32_t cur_size = min(byte_count, max_bytes);
  
-               dst_addr += amdgpu_ttm_domain_start(adev,
-                                                   bo->tbo.resource->mem_type);
                amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
                                        cur_size);
  
-               amdgpu_res_next(&cursor, cur_size);
+               dst_addr += cur_size;
+               byte_count -= cur_size;
        }
  
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@@ -2048,6 -2080,55 +2080,55 @@@ error_free
        return r;
  }
  
+ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+                       uint32_t src_data,
+                       struct dma_resv *resv,
+                       struct dma_fence **f)
+ {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+       struct dma_fence *fence = NULL;
+       struct amdgpu_res_cursor dst;
+       int r;
+       if (!adev->mman.buffer_funcs_enabled) {
+               DRM_ERROR("Trying to clear memory with ring turned off.\n");
+               return -EINVAL;
+       }
+       amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
+       mutex_lock(&adev->mman.gtt_window_lock);
+       while (dst.remaining) {
+               struct dma_fence *next;
+               uint64_t cur_size, to;
+               /* Never fill more than 256MiB at once to avoid timeouts */
+               cur_size = min(dst.size, 256ULL << 20);
+               r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
+                                         1, ring, false, &cur_size, &to);
+               if (r)
+                       goto error;
+               r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
+                                       &next, true);
+               if (r)
+                       goto error;
+               dma_fence_put(fence);
+               fence = next;
+               amdgpu_res_next(&dst, cur_size);
+       }
+ error:
+       mutex_unlock(&adev->mman.gtt_window_lock);
+       if (f)
+               *f = dma_fence_get(fence);
+       dma_fence_put(fence);
+       return r;
+ }
  /**
   * amdgpu_ttm_evict_resources - evict memory buffers
   * @adev: amdgpu device object
@@@ -2087,7 -2168,7 +2168,7 @@@ static int amdgpu_mm_vram_table_show(st
                                                            TTM_PL_VRAM);
        struct drm_printer p = drm_seq_file_printer(m);
  
 -      man->func->debug(man, &p);
 +      ttm_resource_manager_debug(man, &p);
        return 0;
  }
  
@@@ -2105,7 -2186,7 +2186,7 @@@ static int amdgpu_mm_tt_table_show(stru
                                                            TTM_PL_TT);
        struct drm_printer p = drm_seq_file_printer(m);
  
 -      man->func->debug(man, &p);
 +      ttm_resource_manager_debug(man, &p);
        return 0;
  }
  
@@@ -2116,7 -2197,7 +2197,7 @@@ static int amdgpu_mm_gds_table_show(str
                                                            AMDGPU_PL_GDS);
        struct drm_printer p = drm_seq_file_printer(m);
  
 -      man->func->debug(man, &p);
 +      ttm_resource_manager_debug(man, &p);
        return 0;
  }
  
@@@ -2127,7 -2208,7 +2208,7 @@@ static int amdgpu_mm_gws_table_show(str
                                                            AMDGPU_PL_GWS);
        struct drm_printer p = drm_seq_file_printer(m);
  
 -      man->func->debug(man, &p);
 +      ttm_resource_manager_debug(man, &p);
        return 0;
  }
  
@@@ -2138,7 -2219,7 +2219,7 @@@ static int amdgpu_mm_oa_table_show(stru
                                                            AMDGPU_PL_OA);
        struct drm_printer p = drm_seq_file_printer(m);
  
 -      man->func->debug(man, &p);
 +      ttm_resource_manager_debug(man, &p);
        return 0;
  }
  
index 7442095f089c72856c9cde84e36ffe3030fbe59a,6c99ef700cc82d182dc5bab40809e63baf142962..fce9a13a6ba1cdecdb2c5a5f89422ea599557343
@@@ -281,7 -281,7 +281,7 @@@ int amdgpu_vram_mgr_reserve_range(struc
        rsv->mm_node.size = size >> PAGE_SHIFT;
  
        spin_lock(&mgr->lock);
-       list_add_tail(&mgr->reservations_pending, &rsv->node);
+       list_add_tail(&rsv->node, &mgr->reservations_pending);
        amdgpu_vram_mgr_do_reserve(&mgr->manager);
        spin_unlock(&mgr->lock);
  
@@@ -472,7 -472,6 +472,7 @@@ error_free
        while (i--)
                drm_mm_remove_node(&node->mm_nodes[i]);
        spin_unlock(&mgr->lock);
 +      ttm_resource_fini(man, &node->base);
        kvfree(node);
  
  error_sub:
@@@ -512,7 -511,6 +512,7 @@@ static void amdgpu_vram_mgr_del(struct 
        atomic64_sub(usage, &mgr->usage);
        atomic64_sub(vis_usage, &mgr->vis_usage);
  
 +      ttm_resource_fini(man, res);
        kvfree(node);
  }
  
@@@ -691,8 -689,7 +691,8 @@@ int amdgpu_vram_mgr_init(struct amdgpu_
        struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
        struct ttm_resource_manager *man = &mgr->manager;
  
 -      ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
 +      ttm_resource_manager_init(man, &adev->mman.bdev,
 +                                adev->gmc.real_vram_size >> PAGE_SHIFT);
  
        man->func = &amdgpu_vram_mgr_func;
  
index 526076e4bde35e9de011f9c67c7aeae877e34a18,11298615abfb2d3b3a8773e27f401cfc37f1ad3b..be4852757818d45f193b6ab4228d9e86defb6659
@@@ -76,7 -76,7 +76,7 @@@
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_uapi.h>
  #include <drm/drm_atomic_helper.h>
 -#include <drm/drm_dp_mst_helper.h>
 +#include <drm/dp/drm_dp_mst_helper.h>
  #include <drm/drm_fb_helper.h>
  #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
@@@ -1027,7 -1027,6 +1027,6 @@@ static int dm_dmub_hw_init(struct amdgp
        const unsigned char *fw_inst_const, *fw_bss_data;
        uint32_t i, fw_inst_const_size, fw_bss_data_size;
        bool has_hw_support;
-       struct dc *dc = adev->dm.dc;
  
        if (!dmub_srv)
                /* DMUB isn't supported on the ASIC. */
        for (i = 0; i < fb_info->num_fb; ++i)
                hw_params.fb[i] = &fb_info->fb[i];
  
-       switch (adev->asic_type) {
-       case CHIP_YELLOW_CARP:
-               if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
-                       hw_params.dpia_supported = true;
+       switch (adev->ip_versions[DCE_HWIP][0]) {
+       case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+               hw_params.dpia_supported = true;
  #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
+               hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
  #endif
-               }
                break;
        default:
                break;
@@@ -1496,10 -1493,10 +1493,10 @@@ static int amdgpu_dm_init(struct amdgpu
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
  #endif
  
-       init_data.flags.power_down_display_on_boot = true;
+       init_data.flags.seamless_boot_edp_requested = false;
  
        if (check_seamless_boot_capability(adev)) {
-               init_data.flags.power_down_display_on_boot = false;
+               init_data.flags.seamless_boot_edp_requested = true;
                init_data.flags.allow_seamless_boot_optimization = true;
                DRM_INFO("Seamless boot condition check passed\n");
        }
@@@ -2179,12 -2176,8 +2176,8 @@@ static void s3_handle_mst(struct drm_de
  
  static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
  {
-       struct smu_context *smu = &adev->smu;
        int ret = 0;
  
-       if (!is_support_sw_smu(adev))
-               return 0;
        /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
         * on window driver dc implementation.
         * For Navi1x, clock settings of dcn watermarks are fixed. the settings
                return 0;
        }
  
-       ret = smu_write_watermarks_table(smu);
+       ret = amdgpu_dpm_write_watermarks_table(adev);
        if (ret) {
                DRM_ERROR("Failed to update WMTABLE!\n");
                return ret;
@@@ -3653,7 -3646,7 +3646,7 @@@ static int dcn10_register_irq_handlers(
  
        /* Use GRPH_PFLIP interrupt */
        for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
-                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
                        i++) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
                if (r) {
@@@ -5856,7 -5849,7 +5849,7 @@@ static void fill_stream_properties_from
        else if (drm_mode_is_420_also(info, mode_in)
                        && aconnector->force_yuv420_output)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
 -      else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
 +      else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
                        && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
        else
@@@ -6435,8 -6428,7 +6428,7 @@@ create_stream_for_sink(struct amdgpu_dm
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = amdgpu_freesync_vid_mode &&
-                                is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        saved_mode = mode;
                        if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
                                stream->use_vsc_sdp_for_colorimetry = true;
                }
-               mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+               mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
                aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
  
        }
@@@ -8143,6 -8135,9 +8135,9 @@@ static void amdgpu_dm_connector_add_com
                mode = amdgpu_dm_create_common_mode(encoder,
                                common_modes[i].name, common_modes[i].w,
                                common_modes[i].h);
+               if (!mode)
+                       continue;
                drm_mode_probed_add(connector, mode);
                amdgpu_dm_connector->num_modes++;
        }
@@@ -8304,7 -8299,7 +8299,7 @@@ static void amdgpu_dm_connector_add_fre
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
  
-       if (!(amdgpu_freesync_vid_mode && edid))
+       if (!edid)
                return;
  
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@@ -8371,7 -8366,7 +8366,7 @@@ void amdgpu_dm_connector_init_helper(st
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
-               link->link_enc = dp_get_link_enc(link);
+               link->link_enc = link_enc_cfg_get_link_enc(link);
                ASSERT(link->link_enc);
                if (link->link_enc)
                        aconnector->base.ycbcr_420_allowed =
@@@ -10271,8 -10266,7 +10266,7 @@@ static int dm_update_crtc_state(struct 
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (amdgpu_freesync_vid_mode &&
-                   dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
  
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
  
-               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
                        set_freesync_fixed_config(dm_new_crtc_state);
  
                        goto skip_modeset;
-               } else if (amdgpu_freesync_vid_mode && aconnector &&
+               } else if (aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
index bee806ae3e525a05cbda19e1249e45e7546b8671,e35977fda5c11a2d20cb7afc06dd7ccb49db41c0..28a2b9d476b47ede0abc0d5ff21c9fa1de097a57
@@@ -29,7 -29,7 +29,7 @@@
  #include <drm/drm_atomic.h>
  #include <drm/drm_connector.h>
  #include <drm/drm_crtc.h>
 -#include <drm/drm_dp_mst_helper.h>
 +#include <drm/dp/drm_dp_mst_helper.h>
  #include <drm/drm_plane.h>
  
  /*
@@@ -604,6 -604,7 +604,7 @@@ struct amdgpu_dm_connector 
  #endif
        bool force_yuv420_output;
        struct dsc_preferred_settings dsc_settings;
+       union dp_downstream_port_present mst_downstream_port_present;
        /* Cached display modes */
        struct drm_display_mode freesync_vid_base;
  
index 35c944a8e74d5d206a9ec5f2c2a620bce95f2423,8e97d21bdf5c8407f187ef0287803ead3d5b67d8..d24be9fb5845427f6ceaf30dd6ec7fe5d93b176a
@@@ -25,8 -25,8 +25,8 @@@
  
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_helper.h>
 -#include <drm/drm_dp_mst_helper.h>
 -#include <drm/drm_dp_helper.h>
 +#include <drm/dp/drm_dp_mst_helper.h>
 +#include <drm/dp/drm_dp_helper.h>
  #include "dm_services.h"
  #include "amdgpu.h"
  #include "amdgpu_dm.h"
@@@ -159,7 -159,7 +159,7 @@@ static const struct drm_connector_func
  };
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)
static bool needs_dsc_aux_workaround(struct dc_link *link)
+ bool needs_dsc_aux_workaround(struct dc_link *link)
  {
        if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
            (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
@@@ -209,6 -209,25 +209,25 @@@ static bool validate_dsc_caps_on_connec
  
        return true;
  }
+ bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
+ {
+       union dp_downstream_port_present ds_port_present;
+       if (!aconnector->dsc_aux)
+               return false;
+       if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
+               DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
+               return false;
+       }
+       aconnector->mst_downstream_port_present = ds_port_present;
+       DRM_INFO("Downstream port present %d, type %d\n",
+                       ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
+       return true;
+ }
  #endif
  
  static int dm_dp_mst_get_modes(struct drm_connector *connector)
                        if (!validate_dsc_caps_on_connector(aconnector))
                                memset(&aconnector->dc_sink->dsc_caps,
                                       0, sizeof(aconnector->dc_sink->dsc_caps));
+                       if (!retrieve_downstream_port_device(aconnector))
+                               memset(&aconnector->mst_downstream_port_present,
+                                       0, sizeof(aconnector->mst_downstream_port_present));
  #endif
                }
        }
index 6f5528d340939c0984a52a48778e2cfb0da50785,83fc03a5c9cd6e204da972c4a1be321d9e58a159..467f606ba2c729d79adfc06d042cee18cd795c75
@@@ -1220,6 -1220,8 +1220,8 @@@ struct dc *dc_create(const struct dc_in
  
                dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
  
+               dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
                if (dc->res_pool->dmcu != NULL)
                        dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
        }
@@@ -1418,22 -1420,15 +1420,22 @@@ static void program_timing_sync
                                        pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
                        }
                } else {
 -                      /* remove any other pipes by checking valid plane */
                        for (j = j + 1; j < group_size; j++) {
 -                              if (pipe_set[j]->plane_state) {
 +                              bool is_blanked;
 +
 +                              if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
 +                                      is_blanked =
 +                                              pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
 +                              else
 +                                      is_blanked =
 +                                              pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
 +                              if (!is_blanked) {
                                        group_size--;
                                        pipe_set[j] = pipe_set[group_size];
                                        j--;
                                }
 -                      }
 -              }
 +                      }
 +              }
  
                if (group_size > 1) {
                        if (sync_type == TIMING_SYNCHRONIZABLE) {
@@@ -1467,7 -1462,7 +1469,7 @@@ static bool context_changed
        return false;
  }
  
- bool dc_validate_seamless_boot_timing(const struct dc *dc,
+ bool dc_validate_boot_timing(const struct dc *dc,
                                const struct dc_sink *sink,
                                struct dc_crtc_timing *crtc_timing)
  {
@@@ -2377,10 -2372,8 +2379,8 @@@ static enum surface_update_type check_u
                if (stream_update->dsc_config)
                        su_flags->bits.dsc_changed = 1;
  
- #if defined(CONFIG_DRM_AMD_DC_DCN)
                if (stream_update->mst_bw_update)
                        su_flags->bits.mst_bw = 1;
- #endif
  
                if (su_flags->raw != 0)
                        overall_type = UPDATE_TYPE_FULL;
@@@ -2722,6 -2715,9 +2722,9 @@@ static void commit_planes_do_stream_upd
                                        stream_update->vsp_infopacket) {
                                resource_build_info_frame(pipe_ctx);
                                dc->hwss.update_info_frame(pipe_ctx);
+                               if (dc_is_dp_signal(pipe_ctx->stream->signal))
+                                       dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
                        }
  
                        if (stream_update->hdr_static_metadata &&
                        if (stream_update->dsc_config)
                                dp_update_dsc_config(pipe_ctx);
  
- #if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (stream_update->mst_bw_update) {
                                if (stream_update->mst_bw_update->is_increase)
                                        dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
                                else
                                        dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
                        }
- #endif
  
                        if (stream_update->pending_test_pattern) {
                                dc_link_dp_set_test_pattern(stream->link,
index dbfe6690ded8a7353b26960067780225724b2c81,63ed28cbf8736beeee1ce49cd99159756386b0d7..2369f38ed06f1b95079205f64d1d4524c1846d7b
@@@ -54,10 -54,8 +54,8 @@@ struct resource_caps 
        int num_dsc;
        unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output).
        unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters).
- #if defined(CONFIG_DRM_AMD_DC_DCN)
        int num_hpo_dp_stream_encoder;
        int num_hpo_dp_link_encoder;
- #endif
        int num_mpc_3dlut;
  };
  
@@@ -77,14 -75,12 +75,12 @@@ struct resource_create_funcs 
        struct stream_encoder *(*create_stream_encoder)(
                        enum engine_id eng_id, struct dc_context *ctx);
  
- #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)(
                        enum engine_id eng_id, struct dc_context *ctx);
  
        struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)(
                        uint8_t inst,
                        struct dc_context *ctx);
- #endif
  
        struct dce_hwseq *(*create_hwseq)(
                        struct dc_context *ctx);
@@@ -205,12 -201,9 +201,9 @@@ int get_num_mpc_splits(struct pipe_ctx 
  
  int get_num_odm_splits(struct pipe_ctx *pipe);
  
- #if defined(CONFIG_DRM_AMD_DC_DCN)
- struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
-               const struct resource_context *res_ctx,
-               const struct resource_pool *pool,
-               const struct dc_link *link);
- #endif
+ bool get_temp_dp_link_res(struct dc_link *link,
+               struct link_resource *link_res,
+               struct dc_link_settings *link_settings);
  
  void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
        struct dc_state *context);
  void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
        struct dc_state *context,
        uint8_t disabled_master_pipe_idx);
 +
  uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
  
+ const struct link_hwss *get_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
index 270260e82b616029371131448bdeec8bda3809e2,4edaa3318714bc0e105dc0d6e4abd7a7011dcc75..ac822181359c4f6904eec040ea7e2f1860ba3d94
@@@ -26,7 -26,7 +26,7 @@@
  #ifndef __DAL_DPCD_DEFS_H__
  #define __DAL_DPCD_DEFS_H__
  
 -#include <drm/drm_dp_helper.h>
 +#include <drm/dp/drm_dp_helper.h>
  #ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
  #define DP_SINK_HW_REVISION_START 0x409
  #endif
@@@ -144,14 -144,10 +144,10 @@@ enum dpcd_training_patterns 
        DPCD_TRAINING_PATTERN_1,
        DPCD_TRAINING_PATTERN_2,
        DPCD_TRAINING_PATTERN_3,
- #if defined(CONFIG_DRM_AMD_DC_DCN)
        DPCD_TRAINING_PATTERN_4 = 7,
        DPCD_128b_132b_TPS1 = 1,
        DPCD_128b_132b_TPS2 = 2,
        DPCD_128b_132b_TPS2_CDS = 3,
- #else
-       DPCD_TRAINING_PATTERN_4 = 7
- #endif
  };
  
  /* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus