Merge tag 'amd-drm-next-5.10-2020-09-03' of git://people.freedesktop.org/~agd5f/linux...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / gfx_v8_0.c
index 88f63d7ea3712fc49d75a6a6b04aff9f4dd253d7..94b7e0531d09285e41e0eb6a21569b081bd8a7a5 100644 (file)
@@ -1343,21 +1343,22 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
        amdgpu_gfx_compute_queue_acquire(adev);
 
        mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
+       if (mec_hpd_size) {
+               r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+                                             AMDGPU_GEM_DOMAIN_VRAM,
+                                             &adev->gfx.mec.hpd_eop_obj,
+                                             &adev->gfx.mec.hpd_eop_gpu_addr,
+                                             (void **)&hpd);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+                       return r;
+               }
 
-       r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_VRAM,
-                                     &adev->gfx.mec.hpd_eop_obj,
-                                     &adev->gfx.mec.hpd_eop_gpu_addr,
-                                     (void **)&hpd);
-       if (r) {
-               dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
-               return r;
-       }
-
-       memset(hpd, 0, mec_hpd_size);
+               memset(hpd, 0, mec_hpd_size);
 
-       amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
-       amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+               amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+               amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+       }
 
        return 0;
 }
@@ -4632,7 +4633,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
 
        gfx_v8_0_kiq_setting(ring);
 
-       if (adev->in_gpu_reset) { /* for GPU_RESET case */
+       if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4669,7 +4670,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
        struct vi_mqd *mqd = ring->mqd_ptr;
        int mqd_idx = ring - &adev->gfx.compute_ring[0];
 
-       if (!adev->in_gpu_reset && !adev->in_suspend) {
+       if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
                memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
                ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
                ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4681,7 +4682,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
 
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
-       } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
+       } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -5294,7 +5295,7 @@ static int gfx_v8_0_early_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
-       adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
+       adev->gfx.num_compute_rings = amdgpu_num_kcq;
        adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
        gfx_v8_0_set_ring_funcs(adev);
        gfx_v8_0_set_irq_funcs(adev);
@@ -5342,10 +5343,9 @@ static int gfx_v8_0_late_init(void *handle)
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
 {
-       if (((adev->asic_type == CHIP_POLARIS11) ||
+       if ((adev->asic_type == CHIP_POLARIS11) ||
            (adev->asic_type == CHIP_POLARIS12) ||
-           (adev->asic_type == CHIP_VEGAM)) &&
-           adev->powerplay.pp_funcs->set_powergating_by_smu)
+           (adev->asic_type == CHIP_VEGAM))
                /* Send msg to SMU via Powerplay */
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
 
@@ -5879,8 +5879,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_CG,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5901,8 +5900,7 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_MG,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        return 0;
@@ -5931,8 +5929,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_CG,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@@ -5951,8 +5948,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_3D,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@@ -5973,8 +5969,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_MG,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@@ -5989,8 +5984,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                                PP_BLOCK_GFX_RLC,
                                pp_support_state,
                                pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@@ -6004,8 +5998,7 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
                        PP_BLOCK_GFX_CP,
                        pp_support_state,
                        pp_state);
-               if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
-                       amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
+               amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
        }
 
        return 0;