Merge tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 22:34:42 +0000 (14:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 17 Nov 2017 22:34:42 +0000 (14:34 -0800)
Pull amdgpu DC display code for Vega from Dave Airlie:
 "This is the pull request for the AMD DC (display code) layer which is
  a requirement to program the display engines on the new Vega and Raven
  based GPUs. It also contains support for all amdgpu supported GPUs
  (CIK, VI, Polaris), which has to be enabled. It is also a kms atomic
  modesetting compatible driver (unlike the current in-tree display
  code).

  I've kept it separate from drm-next because it may have some things
  that cause you to reject it.

  Background story:

  AMD have an internal team creating a shared OS codebase for display at
  hw bring up time using information from their hardware teams. This
  process doesn't lead to the most Linux friendly/looking code but we
  have worked together on cleaning a lot of it up and dealing with
  sparse/smatch/checkpatch, and having their team internally adhere to
  Linux coding standards.

  This tree is a complete history rebased since they started opening it,
  we decided not to squash it down as the history may have some value.
  Some of the commits therefore might not reach kernel standards, and we
  are steadily training people in AMD to better write commit msgs.

  There is a major bunch of generated bandwidth calculation and
  verification code that comes from their hardware team. On Vega and
  before this is float calculations, on Raven (DCN10) this is double
  based. They do the required things to do FP in the kernel, and I could
  understand this might raise some issues. Rewriting the bandwidth would
  be a major undertaken in reverification, it's non-trivial to work out
  if a display can handle the complete set of mode information thrown at
  it.

  Future story:

  There is a TODO list with this, and it address most of the remaining
  things that would be nice to refine/remove. The DCN10 code is still
  under development internally and they push out a lot of patches quite
  regularly and are supporting this code base with their display team. I
  think we've reached the point where keeping it out of tree is going to
  motivate distributions to start carrying the code, so I'd prefer we
  get it in tree. I think this code is slightly better than STAGING
  quality but not massively so, I'd really like to see that float/double
  magic gone and fixed point used, but AMD don't seem to think the
  accuracy and revalidation of the code is worth the effort"

* tag 'drm-for-v4.15-amd-dc' of git://people.freedesktop.org/~airlied/linux: (1110 commits)
  drm/amd/display: fix MST link training fail division by 0
  drm/amd/display: Fix formatting for null pointer dereference fix
  drm/amd/display: Remove dangling planes on dc commit state
  drm/amd/display: add flip_immediate to commit update for stream
  drm/amd/display: Miss register MST encoder cbs
  drm/amd/display: Fix warnings on S3 resume
  drm/amd/display: use num_timing_generator instead of pipe_count
  drm/amd/display: use configurable FBC option in dm
  drm/amd/display: fix AZ clock not enabled before program AZ endpoint
  amdgpu/dm: Don't use DRM_ERROR in amdgpu_dm_atomic_check
  amd/display: Fix potential null dereference in dce_calcs.c
  amdgpu/dm: Remove unused forward declaration
  drm/amdgpu: Remove unused dc_stream from amdgpu_crtc
  amdgpu/dc: Fix double unlock in amdgpu_dm_commit_planes
  amdgpu/dc: Fix missing null checks in amdgpu_dm.c
  amdgpu/dc: Fix potential null dereferences in amdgpu_dm.c
  amdgpu/dc: fix more indentation warnings
  amdgpu/dc: handle allocation failures in dc_commit_planes_to_stream.
  amdgpu/dc: fix indentation warning from smatch.
  amdgpu/dc: fix non-ansi function decls.
  ...

1  2 
Documentation/gpu/todo.rst
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/soc15.c

index 96f8ec7dbe4edb054664a9b8048530928182017b,7a69e22590b8afb12634567d7ddba4005aef0785..36625aa66c27672c00219e1283d4ecb034913e40
@@@ -304,18 -304,6 +304,18 @@@ There's a bunch of issues with it
  
  Contact: Daniel Vetter
  
 +KMS cleanups
 +------------
 +
 +Some of these date from the very introduction of KMS in 2008 ...
 +
 +- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
 +  be renamed to drm_mode_config.object_idr.
 +
 +- drm_display_mode doesn't need to be derived from drm_mode_object. That's
 +  leftovers from older (never merged into upstream) KMS designs where modes
 +  where set using their ID, including support to add/remove modes.
 +
  Better Testing
  ==============
  
@@@ -409,5 -397,15 +409,15 @@@ those drivers as simple as possible, s
  
  Contact: Noralf Trønnes, Daniel Vetter
  
+ AMD DC Display Driver
+ ---------------------
+ AMD DC is the display driver for AMD devices starting with Vega. There has been
+ a bunch of progress cleaning it up but there's still plenty of work to be done.
+ See drivers/gpu/drm/amd/display/TODO for tasks.
+ Contact: Harry Wentland, Alex Deucher
  Outside DRM
  ===========
index 7fc42e0777705fd6597fb7631aca09f60d9dcf35,454e6efeb5cfdfd50c088ceb7e4a57850e9bc563..78d609123420455a1d8811eaeb6e91ebaa26e626
@@@ -1,16 -1,21 +1,22 @@@
 +# SPDX-License-Identifier: GPL-2.0
  #
  # Makefile for the drm device driver.  This driver provides support for the
  # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
  
  FULL_AMD_PATH=$(src)/..
+ DISPLAY_FOLDER_NAME=display
+ FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
  
  ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
        -I$(FULL_AMD_PATH)/include \
        -I$(FULL_AMD_PATH)/amdgpu \
        -I$(FULL_AMD_PATH)/scheduler \
        -I$(FULL_AMD_PATH)/powerplay/inc \
-       -I$(FULL_AMD_PATH)/acp/include
+       -I$(FULL_AMD_PATH)/acp/include \
+       -I$(FULL_AMD_DISPLAY_PATH) \
+       -I$(FULL_AMD_DISPLAY_PATH)/include \
+       -I$(FULL_AMD_DISPLAY_PATH)/dc \
+       -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
  
  amdgpu-y := amdgpu_drv.o
  
@@@ -26,7 -31,7 +32,7 @@@ amdgpu-y += amdgpu_device.o amdgpu_kms.
        amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
        amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
 -      amdgpu_queue_mgr.o amdgpu_vf_error.o
 +      amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
  
  # add asic specific block
  amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@@ -133,4 -138,13 +139,13 @@@ include $(FULL_AMD_PATH)/powerplay/Make
  
  amdgpu-y += $(AMD_POWERPLAY_FILES)
  
+ ifneq ($(CONFIG_DRM_AMD_DC),)
+ RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+ include $(FULL_AMD_DISPLAY_PATH)/Makefile
+ amdgpu-y += $(AMD_DISPLAY_FILES)
+ endif
  obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
index cbcb6a153abae61810f1223e4080a2f18af60706,d161da95fffd55681ea4971478ac194a871a1883..5afaf6016b4a654f552c5a7439bfac8e3425ddc6
@@@ -66,6 -66,7 +66,7 @@@
  #include "amdgpu_vce.h"
  #include "amdgpu_vcn.h"
  #include "amdgpu_mn.h"
+ #include "amdgpu_dm.h"
  
  #include "gpu_scheduler.h"
  #include "amdgpu_virt.h"
@@@ -101,6 -102,8 +102,8 @@@ extern int amdgpu_vm_fragment_size
  extern int amdgpu_vm_fault_stop;
  extern int amdgpu_vm_debug;
  extern int amdgpu_vm_update_mode;
+ extern int amdgpu_dc;
+ extern int amdgpu_dc_log;
  extern int amdgpu_sched_jobs;
  extern int amdgpu_sched_hw_submission;
  extern int amdgpu_no_evict;
@@@ -732,14 -735,10 +735,14 @@@ struct amdgpu_ctx 
        struct amdgpu_device    *adev;
        struct amdgpu_queue_mgr queue_mgr;
        unsigned                reset_counter;
 +      uint32_t                vram_lost_counter;
        spinlock_t              ring_lock;
        struct dma_fence        **fences;
        struct amdgpu_ctx_ring  rings[AMDGPU_MAX_RINGS];
 -      bool preamble_presented;
 +      bool                    preamble_presented;
 +      enum amd_sched_priority init_priority;
 +      enum amd_sched_priority override_priority;
 +      struct mutex            lock;
  };
  
  struct amdgpu_ctx_mgr {
@@@ -756,18 -755,13 +759,18 @@@ int amdgpu_ctx_add_fence(struct amdgpu_
                              struct dma_fence *fence, uint64_t *seq);
  struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                   struct amdgpu_ring *ring, uint64_t seq);
 +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
 +                                enum amd_sched_priority priority);
  
  int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp);
  
 +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
 +
  void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  
 +
  /*
   * file private structure
   */
@@@ -779,6 -773,7 +782,6 @@@ struct amdgpu_fpriv 
        struct mutex            bo_list_lock;
        struct idr              bo_list_handles;
        struct amdgpu_ctx_mgr   ctx_mgr;
 -      u32                     vram_lost_counter;
  };
  
  /*
@@@ -879,7 -874,7 +882,7 @@@ struct amdgpu_mec 
  struct amdgpu_kiq {
        u64                     eop_gpu_addr;
        struct amdgpu_bo        *eop_obj;
 -      struct mutex            ring_mutex;
 +      spinlock_t              ring_lock;
        struct amdgpu_ring      ring;
        struct amdgpu_irq_src   irq;
  };
@@@ -1043,10 -1038,6 +1046,10 @@@ struct amdgpu_gfx 
        bool                            in_suspend;
        /* NGG */
        struct amdgpu_ngg               ngg;
 +
 +      /* pipe reservation */
 +      struct mutex                    pipe_reserve_mutex;
 +      DECLARE_BITMAP                  (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  };
  
  int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@@ -1125,7 -1116,6 +1128,7 @@@ struct amdgpu_job 
        uint32_t                gds_base, gds_size;
        uint32_t                gws_base, gws_size;
        uint32_t                oa_base, oa_size;
 +      uint32_t                vram_lost_counter;
  
        /* user fence handling */
        uint64_t                uf_addr;
@@@ -1151,7 -1141,7 +1154,7 @@@ static inline void amdgpu_set_ib_value(
  /*
   * Writeback
   */
 -#define AMDGPU_MAX_WB 1024    /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
 +#define AMDGPU_MAX_WB 512     /* Reserve at most 512 WB slots for amdgpu-owned rings. */
  
  struct amdgpu_wb {
        struct amdgpu_bo        *wb_obj;
@@@ -1391,18 -1381,6 +1394,18 @@@ struct amdgpu_atcs 
        struct amdgpu_atcs_functions functions;
  };
  
 +/*
 + * Firmware VRAM reservation
 + */
 +struct amdgpu_fw_vram_usage {
 +      u64 start_offset;
 +      u64 size;
 +      struct amdgpu_bo *reserved_bo;
 +      void *va;
 +};
 +
 +int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
 +
  /*
   * CGS
   */
@@@ -1535,6 -1513,7 +1538,7 @@@ struct amdgpu_device 
        /* display */
        bool                            enable_virtual_display;
        struct amdgpu_mode_info         mode_info;
+       /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
        struct work_struct              hotplug_work;
        struct amdgpu_irq_src           crtc_irq;
        struct amdgpu_irq_src           pageflip_irq;
        /* GDS */
        struct amdgpu_gds               gds;
  
+       /* display related functionality */
+       struct amdgpu_display_manager dm;
        struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
        int                             num_ip_blocks;
        struct mutex    mn_lock;
        struct delayed_work     late_init_work;
  
        struct amdgpu_virt      virt;
 +      /* firmware VRAM reservation */
 +      struct amdgpu_fw_vram_usage fw_vram_usage;
  
        /* link all shadow bo */
        struct list_head                shadow_list;
@@@ -1653,6 -1633,9 +1660,9 @@@ void amdgpu_mm_wdoorbell(struct amdgpu_
  u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
  void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
  
+ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
  /*
   * Registers read & write functions.
   */
@@@ -1860,6 -1843,8 +1870,6 @@@ static inline bool amdgpu_has_atpx(void
  extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  extern const int amdgpu_max_kms_ioctl;
  
 -bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
 -                        struct amdgpu_fpriv *fpriv);
  int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  void amdgpu_driver_unload_kms(struct drm_device *dev);
  void amdgpu_driver_lastclose_kms(struct drm_device *dev);
@@@ -1911,5 -1896,11 +1921,11 @@@ int amdgpu_cs_find_mapping(struct amdgp
                           uint64_t addr, struct amdgpu_bo **bo,
                           struct amdgpu_bo_va_mapping **mapping);
  
+ #if defined(CONFIG_DRM_AMD_DC)
+ int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+ #else
+ static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+ #endif
  #include "amdgpu_object.h"
  #endif
index efcacb827de73cab22755eb6c13ec05b76cd6bd8,f5543758667f47c3bca7987cba1e5014bb9945b7..2d792cdc094cd60e86542c0ed5380c3590198fc2
@@@ -31,6 -31,7 +31,7 @@@
  #include <linux/debugfs.h>
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
+ #include <drm/drm_atomic_helper.h>
  #include <drm/amdgpu_drm.h>
  #include <linux/vgaarb.h>
  #include <linux/vga_switcheroo.h>
@@@ -109,8 -110,10 +110,8 @@@ uint32_t amdgpu_mm_rreg(struct amdgpu_d
  {
        uint32_t ret;
  
 -      if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
 -              BUG_ON(in_interrupt());
 +      if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_virt_kiq_rreg(adev, reg);
 -      }
  
        if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
                ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@@ -135,8 -138,10 +136,8 @@@ void amdgpu_mm_wreg(struct amdgpu_devic
                adev->last_mm_index = v;
        }
  
 -      if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
 -              BUG_ON(in_interrupt());
 +      if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
                return amdgpu_virt_kiq_wreg(adev, reg, v);
 -      }
  
        if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@@ -546,7 -551,7 +547,7 @@@ int amdgpu_wb_get(struct amdgpu_device 
  
        if (offset < adev->wb.num_wb) {
                __set_bit(offset, adev->wb.used);
 -              *wb = offset * 8; /* convert to dw offset */
 +              *wb = offset << 3; /* convert to dw offset */
                return 0;
        } else {
                return -EINVAL;
  void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
  {
        if (wb < adev->wb.num_wb)
 -              __clear_bit(wb, adev->wb.used);
 +              __clear_bit(wb >> 3, adev->wb.used);
  }
  
  /**
@@@ -654,96 -659,42 +655,96 @@@ void amdgpu_gart_location(struct amdgpu
  }
  
  /*
 - * GPU helpers function.
 + * Firmware Reservation functions
   */
  /**
 - * amdgpu_need_post - check if the hw need post or not
 + * amdgpu_fw_reserve_vram_fini - free fw reserved vram
   *
   * @adev: amdgpu_device pointer
   *
 - * Check if the asic has been initialized (all asics) at driver startup
 - * or post is needed if  hw reset is performed.
 - * Returns true if need or false if not.
 + * free fw reserved vram if it has been reserved.
   */
 -bool amdgpu_need_post(struct amdgpu_device *adev)
 +void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
  {
 -      uint32_t reg;
 +      amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
 +              NULL, &adev->fw_vram_usage.va);
 +}
  
 -      if (adev->has_hw_reset) {
 -              adev->has_hw_reset = false;
 -              return true;
 -      }
 +/**
 + * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * create bo vram reservation from fw.
 + */
 +int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
 +{
 +      int r = 0;
 +      u64 gpu_addr;
 +      u64 vram_size = adev->mc.visible_vram_size;
  
 -      /* bios scratch used on CIK+ */
 -      if (adev->asic_type >= CHIP_BONAIRE)
 -              return amdgpu_atombios_scratch_need_asic_init(adev);
 +      adev->fw_vram_usage.va = NULL;
 +      adev->fw_vram_usage.reserved_bo = NULL;
  
 -      /* check MEM_SIZE for older asics */
 -      reg = amdgpu_asic_get_config_memsize(adev);
 +      if (adev->fw_vram_usage.size > 0 &&
 +              adev->fw_vram_usage.size <= vram_size) {
  
 -      if ((reg != 0) && (reg != 0xffffffff))
 -              return false;
 +              r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
 +                      PAGE_SIZE, true, 0,
 +                      AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 +                      AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
 +                      &adev->fw_vram_usage.reserved_bo);
 +              if (r)
 +                      goto error_create;
  
 -      return true;
 +              r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
 +              if (r)
 +                      goto error_reserve;
 +              r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
 +                      AMDGPU_GEM_DOMAIN_VRAM,
 +                      adev->fw_vram_usage.start_offset,
 +                      (adev->fw_vram_usage.start_offset +
 +                      adev->fw_vram_usage.size), &gpu_addr);
 +              if (r)
 +                      goto error_pin;
 +              r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
 +                      &adev->fw_vram_usage.va);
 +              if (r)
 +                      goto error_kmap;
  
 +              amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
 +      }
 +      return r;
 +
 +error_kmap:
 +      amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
 +error_pin:
 +      amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
 +error_reserve:
 +      amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
 +error_create:
 +      adev->fw_vram_usage.va = NULL;
 +      adev->fw_vram_usage.reserved_bo = NULL;
 +      return r;
  }
  
 -static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
 +
 +/*
 + * GPU helpers function.
 + */
 +/**
 + * amdgpu_need_post - check if the hw need post or not
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Check if the asic has been initialized (all asics) at driver startup
 + * or post is needed if  hw reset is performed.
 + * Returns true if need or false if not.
 + */
 +bool amdgpu_need_post(struct amdgpu_device *adev)
  {
 +      uint32_t reg;
 +
        if (amdgpu_sriov_vf(adev))
                return false;
  
                                return true;
                }
        }
 -      return amdgpu_need_post(adev);
 +
 +      if (adev->has_hw_reset) {
 +              adev->has_hw_reset = false;
 +              return true;
 +      }
 +
 +      /* bios scratch used on CIK+ */
 +      if (adev->asic_type >= CHIP_BONAIRE)
 +              return amdgpu_atombios_scratch_need_asic_init(adev);
 +
 +      /* check MEM_SIZE for older asics */
 +      reg = amdgpu_asic_get_config_memsize(adev);
 +
 +      if ((reg != 0) && (reg != 0xffffffff))
 +              return false;
 +
 +      return true;
  }
  
  /**
@@@ -1670,6 -1605,7 +1671,6 @@@ static int amdgpu_init(struct amdgpu_de
                        return r;
                }
                adev->ip_blocks[i].status.sw = true;
 -
                /* need to do gmc hw init early so we can allocate gpu mem */
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
                        r = amdgpu_vram_scratch_init(adev);
                }
        }
  
 -      mutex_lock(&adev->firmware.mutex);
 -      if (amdgpu_ucode_init_bo(adev))
 -              adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
 -      mutex_unlock(&adev->firmware.mutex);
 -
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
@@@ -1835,6 -1776,8 +1836,6 @@@ static int amdgpu_fini(struct amdgpu_de
  
                adev->ip_blocks[i].status.hw = false;
        }
 -      if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT)
 -              amdgpu_ucode_fini_bo(adev);
  
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
@@@ -1946,7 -1889,6 +1947,7 @@@ static int amdgpu_sriov_reinit_late(str
  
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_SMC,
 +              AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_DCE,
                AMD_IP_BLOCK_TYPE_GFX,
                AMD_IP_BLOCK_TYPE_SDMA,
@@@ -2032,20 -1974,61 +2033,66 @@@ static int amdgpu_resume(struct amdgpu_
  
  static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
  {
 -      if (adev->is_atom_fw) {
 -              if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
 -                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 -      } else {
 -              if (amdgpu_atombios_has_gpu_virtualization_table(adev))
 -                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 +      if (amdgpu_sriov_vf(adev)) {
 +              if (adev->is_atom_fw) {
 +                      if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
 +                              adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 +              } else {
 +                      if (amdgpu_atombios_has_gpu_virtualization_table(adev))
 +                              adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
 +              }
 +
 +              if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
 +                      amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
        }
  }
  
+ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+ {
+       switch (asic_type) {
+ #if defined(CONFIG_DRM_AMD_DC)
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KAVERI:
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+       case CHIP_POLARIS11:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+ #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+               return amdgpu_dc != 0;
+ #endif
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+               return amdgpu_dc > 0;
+       case CHIP_VEGA10:
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+       case CHIP_RAVEN:
+ #endif
+               return amdgpu_dc != 0;
+ #endif
+       default:
+               return false;
+       }
+ }
+ /**
+  * amdgpu_device_has_dc_support - check if dc is supported
+  *
+  * @adev: amdgpu_device_pointer
+  *
+  * Returns true for supported, false for not supported
+  */
+ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+ {
+       if (amdgpu_sriov_vf(adev))
+               return false;
+       return amdgpu_device_asic_has_dc_support(adev->asic_type);
+ }
  /**
   * amdgpu_device_init - initialize the driver
   *
@@@ -2083,7 -2066,6 +2130,7 @@@ int amdgpu_device_init(struct amdgpu_de
        adev->vm_manager.vm_pte_num_rings = 0;
        adev->gart.gart_funcs = NULL;
        adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 +      bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  
        adev->smc_rreg = &amdgpu_invalid_rreg;
        adev->smc_wreg = &amdgpu_invalid_wreg;
        adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
        adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
  
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
                 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
        mutex_init(&adev->pm.mutex);
        mutex_init(&adev->gfx.gpu_clock_mutex);
        mutex_init(&adev->srbm_mutex);
 +      mutex_init(&adev->gfx.pipe_reserve_mutex);
        mutex_init(&adev->grbm_idx_mutex);
        mutex_init(&adev->mn_lock);
        mutex_init(&adev->virt.vf_errors.lock);
        amdgpu_device_detect_sriov_bios(adev);
  
        /* Post card if necessary */
 -      if (amdgpu_vpost_needed(adev)) {
 +      if (amdgpu_need_post(adev)) {
                if (!adev->bios) {
                        dev_err(adev->dev, "no vBIOS found\n");
 -                      amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
                        r = -EINVAL;
                        goto failed;
                }
                r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
                if (r) {
                        dev_err(adev->dev, "gpu post error!\n");
 -                      amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
                        goto failed;
                }
        } else {
                        goto failed;
                }
                /* init i2c buses */
-               amdgpu_atombios_i2c_init(adev);
+               if (!amdgpu_device_has_dc_support(adev))
+                       amdgpu_atombios_i2c_init(adev);
        }
  
        /* Fence driver */
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
  
 +      if (amdgpu_sriov_vf(adev))
 +              amdgpu_virt_init_data_exchange(adev);
 +
        amdgpu_fbdev_init(adev);
  
        r = amdgpu_pm_sysfs_init(adev);
@@@ -2367,7 -2347,6 +2414,7 @@@ void amdgpu_device_fini(struct amdgpu_d
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
 +      amdgpu_fw_reserve_vram_fini(adev);
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
        adev->accel_working = false;
        cancel_delayed_work_sync(&adev->late_init_work);
        /* free i2c buses */
-       amdgpu_i2c_fini(adev);
+       if (!amdgpu_device_has_dc_support(adev))
+               amdgpu_i2c_fini(adev);
        amdgpu_atombios_fini(adev);
        kfree(adev->bios);
        adev->bios = NULL;
@@@ -2429,12 -2409,14 +2477,14 @@@ int amdgpu_device_suspend(struct drm_de
  
        drm_kms_helper_poll_disable(dev);
  
-       /* turn off display hw */
-       drm_modeset_lock_all(dev);
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+       if (!amdgpu_device_has_dc_support(adev)) {
+               /* turn off display hw */
+               drm_modeset_lock_all(dev);
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+               }
+               drm_modeset_unlock_all(dev);
        }
-       drm_modeset_unlock_all(dev);
  
        amdgpu_amdkfd_suspend(adev);
  
@@@ -2577,13 -2559,25 +2627,25 @@@ int amdgpu_device_resume(struct drm_dev
  
        /* blat the mode back in */
        if (fbcon) {
-               drm_helper_resume_force_mode(dev);
-               /* turn on display hw */
-               drm_modeset_lock_all(dev);
-               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-                       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+               if (!amdgpu_device_has_dc_support(adev)) {
+                       /* pre DCE11 */
+                       drm_helper_resume_force_mode(dev);
+                       /* turn on display hw */
+                       drm_modeset_lock_all(dev);
+                       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                               drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+                       }
+                       drm_modeset_unlock_all(dev);
+               } else {
+                       /*
+                        * There is no equivalent atomic helper to turn on
+                        * display, so we defined our own function for this,
+                        * once suspend resume is supported by the atomic
+                        * framework this will be reworked
+                        */
+                       amdgpu_dm_display_resume(adev);
                }
-               drm_modeset_unlock_all(dev);
        }
  
        drm_kms_helper_poll_enable(dev);
  #ifdef CONFIG_PM
        dev->dev->power.disable_depth++;
  #endif
-       drm_helper_hpd_irq_event(dev);
+       if (!amdgpu_device_has_dc_support(adev))
+               drm_helper_hpd_irq_event(dev);
+       else
+               drm_kms_helper_hotplug_event(dev);
  #ifdef CONFIG_PM
        dev->dev->power.disable_depth--;
  #endif
@@@ -2620,9 -2617,6 +2685,9 @@@ static bool amdgpu_check_soft_reset(str
        int i;
        bool asic_hang = false;
  
 +      if (amdgpu_sriov_vf(adev))
 +              return true;
 +
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@@ -2900,6 -2894,7 +2965,7 @@@ give_up_reset
   */
  int amdgpu_gpu_reset(struct amdgpu_device *adev)
  {
+       struct drm_atomic_state *state = NULL;
        int i, r;
        int resched;
        bool need_full_reset, vram_lost = false;
  
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       /* store modesetting */
+       if (amdgpu_device_has_dc_support(adev))
+               state = drm_atomic_helper_suspend(adev->ddev);
  
        /* block scheduler */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@@ -3022,6 -3020,7 +3091,6 @@@ out
                }
        } else {
                dev_err(adev->dev, "asic resume failed (%d).\n", r);
 -              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        if (adev->rings[i] && adev->rings[i]->sched.thread) {
                                kthread_unpark(adev->rings[i]->sched.thread);
                }
        }
  
-       drm_helper_resume_force_mode(adev->ddev);
+       if (amdgpu_device_has_dc_support(adev)) {
+               r = drm_atomic_helper_resume(adev->ddev, state);
+               amdgpu_dm_display_resume(adev);
+       } else
+               drm_helper_resume_force_mode(adev->ddev);
  
        ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
        if (r) {
                /* bad news, how to tell it to userspace ? */
                dev_info(adev->dev, "GPU reset failed\n");
 -              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
        }
        else {
                dev_info(adev->dev, "GPU reset successed!\n");
index dd2f060d62a86306500ed9cb3dc55915d4e7afda,715ede92ca8af24b702e715d2489dcd472c85742..ec96bb1f9eafbc374cdad09c85f96da8e8d1bbad
   * - 3.19.0 - Add support for UVD MJPEG decode
   * - 3.20.0 - Add support for local BOs
   * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
 + * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
 + * - 3.23.0 - Add query for VRAM lost counter
   */
  #define KMS_DRIVER_MAJOR      3
 -#define KMS_DRIVER_MINOR      21
 +#define KMS_DRIVER_MINOR      23
  #define KMS_DRIVER_PATCHLEVEL 0
  
  int amdgpu_vram_limit = 0;
@@@ -106,6 -104,8 +106,8 @@@ int amdgpu_vm_debug = 0
  int amdgpu_vram_page_split = 512;
  int amdgpu_vm_update_mode = -1;
  int amdgpu_exp_hw_support = 0;
+ int amdgpu_dc = -1;
+ int amdgpu_dc_log = 0;
  int amdgpu_sched_jobs = 32;
  int amdgpu_sched_hw_submission = 2;
  int amdgpu_no_evict = 0;
@@@ -211,6 -211,12 +213,12 @@@ module_param_named(vram_page_split, amd
  MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
  module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
  
+ MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+ module_param_named(dc, amdgpu_dc, int, 0444);
+ MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+ module_param_named(dc_log, amdgpu_dc_log, int, 0444);
  MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
  module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
  
@@@ -518,15 -524,15 +526,15 @@@ static const struct pci_device_id pciid
        {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        /* Vega 10 */
-       {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+       {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
        /* Raven */
        {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
  
index 6f0b26dae3b0009ee127929186ef43d140b87bcb,b83ef4f204b3f8cee960409be3d976a50bede2ad..720139e182a3d8be1cb2015630e3519f96785d8a
@@@ -28,7 -28,6 +28,7 @@@
  #include <drm/drmP.h>
  #include "amdgpu.h"
  #include <drm/amdgpu_drm.h>
 +#include "amdgpu_sched.h"
  #include "amdgpu_uvd.h"
  #include "amdgpu_vce.h"
  
@@@ -270,6 -269,7 +270,6 @@@ static int amdgpu_firmware_info(struct 
  static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
  {
        struct amdgpu_device *adev = dev->dev_private;
 -      struct amdgpu_fpriv *fpriv = filp->driver_priv;
        struct drm_amdgpu_info *info = data;
        struct amdgpu_mode_info *minfo = &adev->mode_info;
        void __user *out = (void __user *)(uintptr_t)info->return_pointer;
  
        if (!info->return_size || !info->return_pointer)
                return -EINVAL;
 -      if (amdgpu_kms_vram_lost(adev, fpriv))
 -              return -ENODEV;
  
        switch (info->query) {
        case AMDGPU_INFO_ACCEL_WORKING:
                }
                return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
        }
 +      case AMDGPU_INFO_VRAM_LOST_COUNTER:
 +              ui32 = atomic_read(&adev->vram_lost_counter);
 +              return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->query);
                return -EINVAL;
@@@ -792,6 -791,12 +792,6 @@@ void amdgpu_driver_lastclose_kms(struc
        vga_switcheroo_process_delayed_switch();
  }
  
 -bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
 -                        struct amdgpu_fpriv *fpriv)
 -{
 -      return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
 -}
 -
  /**
   * amdgpu_driver_open_kms - drm callback for open
   *
@@@ -848,6 -853,7 +848,6 @@@ int amdgpu_driver_open_kms(struct drm_d
  
        amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
  
 -      fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
        file_priv->driver_priv = fpriv;
  
  out_suspend:
@@@ -1017,7 -1023,6 +1017,7 @@@ const struct drm_ioctl_desc amdgpu_ioct
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 +      DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
        DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        /* KMS */
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
  };
  const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
  
index 3ca9d114f630e67e3e7f23b1318e550b49c6f2e4,6705954a985d5caa3e8456dbc2a4c6ea6045eabb..4e67fe1e79553d610c79819b525ecb881c13d059
@@@ -279,7 -279,10 +279,7 @@@ static void soc15_init_golden_registers
  }
  static u32 soc15_get_xclk(struct amdgpu_device *adev)
  {
 -      if (adev->asic_type == CHIP_VEGA10)
 -              return adev->clock.spll.reference_freq/4;
 -      else
 -              return adev->clock.spll.reference_freq;
 +      return adev->clock.spll.reference_freq;
  }
  
  
@@@ -532,6 -535,12 +532,12 @@@ int soc15_set_ip_blocks(struct amdgpu_d
                        amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+               else if (amdgpu_device_has_dc_support(adev))
+                       amdgpu_ip_block_add(adev, &dm_ip_block);
+ #else
+ #     warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+ #endif
                amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
                amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
                amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
                amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
                        amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ #if defined(CONFIG_DRM_AMD_DC)
+               else if (amdgpu_device_has_dc_support(adev))
+                       amdgpu_ip_block_add(adev, &dm_ip_block);
+ #else
+ #     warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+ #endif
                amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
                amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
                amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);