drm/connector: Allow max possible encoders to attach to a connector
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index f4ac632a87b278d09a462bdfe4f145934f249884..5a7f893cf72448d9a4d6e9c12b0d3827d3e7bdcb 100644 (file)
  */
 #include <linux/power_supply.h>
 #include <linux/kthread.h>
+#include <linux/module.h>
 #include <linux/console.h>
 #include <linux/slab.h>
-#include <drm/drmP.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
@@ -51,6 +52,7 @@
 #endif
 #include "vi.h"
 #include "soc15.h"
+#include "nv.h"
 #include "bif/bif_4_1_d.h"
 #include <linux/pci.h>
 #include <linux/firmware.h>
 
 #include "amdgpu_xgmi.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_pmu.h"
 
 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -94,9 +98,32 @@ static const char *amdgpu_asic_name[] = {
        "VEGA12",
        "VEGA20",
        "RAVEN",
+       "NAVI10",
        "LAST",
 };
 
+/**
+ * DOC: pcie_replay_count
+ *
+ * The amdgpu driver provides a sysfs API for reporting the total number
+ * of PCIe replays (NAKs)
+ * The file pcie_replay_count is used for this and returns the total
+ * number of replays as a sum of the NAKs generated and NAKs received
+ */
+
+static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+}
+
+static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
+               amdgpu_device_get_pcie_replay_count, NULL);
+
 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
 
 /**
@@ -484,7 +511,10 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
                } else {
                        tmp = RREG32(reg);
                        tmp &= ~and_mask;
-                       tmp |= or_mask;
+                       if (adev->family >= AMDGPU_FAMILY_AI)
+                               tmp |= (or_mask & and_mask);
+                       else
+                               tmp |= or_mask;
                }
                WREG32(reg, tmp);
        }
@@ -910,8 +940,10 @@ def_value:
  * Validates certain module parameters and updates
  * the associated values used by the driver (all asics).
  */
-static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
+static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 {
+       int ret = 0;
+
        if (amdgpu_sched_jobs < 4) {
                dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
                         amdgpu_sched_jobs);
@@ -949,19 +981,15 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        amdgpu_device_check_block_size(adev);
 
-       if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
-           !is_power_of_2(amdgpu_vram_page_split))) {
-               dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
-                        amdgpu_vram_page_split);
-               amdgpu_vram_page_split = 1024;
-       }
-
-       if (amdgpu_lockup_timeout == 0) {
-               dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
-               amdgpu_lockup_timeout = 10000;
+       ret = amdgpu_device_get_job_timeout_settings(adev);
+       if (ret) {
+               dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+               return ret;
        }
 
        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+
+       return ret;
 }
 
 /**
@@ -1356,6 +1384,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                else
                        chip_name = "raven";
                break;
+       case CHIP_NAVI10:
+               chip_name = "navi10";
+               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -1402,6 +1433,23 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
                adev->gfx.cu_info.max_scratch_slots_per_cu =
                        le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
                adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
+               if (hdr->version_minor >= 1) {
+                       const struct gpu_info_firmware_v1_1 *gpu_info_fw =
+                               (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
+                                                                       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       adev->gfx.config.num_sc_per_sh =
+                               le32_to_cpu(gpu_info_fw->num_sc_per_sh);
+                       adev->gfx.config.num_packer_per_sc =
+                               le32_to_cpu(gpu_info_fw->num_packer_per_sc);
+               }
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+               if (hdr->version_minor == 2) {
+                       const struct gpu_info_firmware_v1_2 *gpu_info_fw =
+                               (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
+                                                                       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
+               }
+#endif
                break;
        }
        default:
@@ -1490,6 +1538,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                if (r)
                        return r;
                break;
+       case  CHIP_NAVI10:
+               adev->family = AMDGPU_FAMILY_NV;
+
+               r = nv_set_ip_blocks(adev);
+               if (r)
+                       return r;
+               break;
        default:
                /* FIXME: not supported yet */
                return -EINVAL;
@@ -1505,6 +1560,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
                        return -EAGAIN;
+
+               /* query the reg access mode at the very beginning */
+               amdgpu_virt_init_reg_access_mode(adev);
        }
 
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1532,6 +1590,19 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                                adev->ip_blocks[i].status.valid = true;
                        }
                }
+               /* get the vbios after the asic_funcs are set up */
+               if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+                       /* Read BIOS */
+                       if (!amdgpu_get_bios(adev))
+                               return -EINVAL;
+
+                       r = amdgpu_atombios_init(adev);
+                       if (r) {
+                               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+                               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+                               return r;
+                       }
+               }
        }
 
        adev->cg_flags &= amdgpu_cg_mask;
@@ -1550,6 +1621,7 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
                if (adev->ip_blocks[i].status.hw)
                        continue;
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+                   (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
                        r = adev->ip_blocks[i].version->funcs->hw_init(adev);
                        if (r) {
@@ -1670,7 +1742,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = true;
 
                        /* right after GMC hw init, we create CSA */
-                       if (amdgpu_sriov_vf(adev)) {
+                       if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
                                                                AMDGPU_GEM_DOMAIN_VRAM,
                                                                AMDGPU_CSA_SIZE);
@@ -1821,6 +1893,43 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
        return 0;
 }
 
+static int amdgpu_device_enable_mgpu_fan_boost(void)
+{
+       struct amdgpu_gpu_instance *gpu_ins;
+       struct amdgpu_device *adev;
+       int i, ret = 0;
+
+       mutex_lock(&mgpu_info.mutex);
+
+       /*
+        * MGPU fan boost feature should be enabled
+        * only when there are two or more dGPUs in
+        * the system
+        */
+       if (mgpu_info.num_dgpu < 2)
+               goto out;
+
+       for (i = 0; i < mgpu_info.num_dgpu; i++) {
+               gpu_ins = &(mgpu_info.gpu_ins[i]);
+               adev = gpu_ins->adev;
+               if (!(adev->flags & AMD_IS_APU) &&
+                   !gpu_ins->mgpu_fan_enabled &&
+                   adev->powerplay.pp_funcs &&
+                   adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
+                       ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
+                       if (ret)
+                               break;
+
+                       gpu_ins->mgpu_fan_enabled = 1;
+               }
+       }
+
+out:
+       mutex_unlock(&mgpu_info.mutex);
+
+       return ret;
+}
+
 /**
  * amdgpu_device_ip_late_init - run late init for hardware IPs
  *
@@ -1854,11 +1963,15 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
 
-       queue_delayed_work(system_wq, &adev->late_init_work,
-                          msecs_to_jiffies(AMDGPU_RESUME_MS));
-
        amdgpu_device_fill_reset_magic(adev);
 
+       r = amdgpu_device_enable_mgpu_fan_boost();
+       if (r)
+               DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+       /* set to low pstate by default */
+       amdgpu_xgmi_set_pstate(adev, 0);
+
        return 0;
 }
 
@@ -1957,65 +2070,20 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        return 0;
 }
 
-static int amdgpu_device_enable_mgpu_fan_boost(void)
-{
-       struct amdgpu_gpu_instance *gpu_ins;
-       struct amdgpu_device *adev;
-       int i, ret = 0;
-
-       mutex_lock(&mgpu_info.mutex);
-
-       /*
-        * MGPU fan boost feature should be enabled
-        * only when there are two or more dGPUs in
-        * the system
-        */
-       if (mgpu_info.num_dgpu < 2)
-               goto out;
-
-       for (i = 0; i < mgpu_info.num_dgpu; i++) {
-               gpu_ins = &(mgpu_info.gpu_ins[i]);
-               adev = gpu_ins->adev;
-               if (!(adev->flags & AMD_IS_APU) &&
-                   !gpu_ins->mgpu_fan_enabled &&
-                   adev->powerplay.pp_funcs &&
-                   adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
-                       ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
-                       if (ret)
-                               break;
-
-                       gpu_ins->mgpu_fan_enabled = 1;
-               }
-       }
-
-out:
-       mutex_unlock(&mgpu_info.mutex);
-
-       return ret;
-}
-
 /**
- * amdgpu_device_ip_late_init_func_handler - work handler for ib test
+ * amdgpu_device_delayed_init_work_handler - work handler for IB tests
  *
  * @work: work_struct.
  */
-static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
+static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device, late_init_work.work);
+               container_of(work, struct amdgpu_device, delayed_init_work.work);
        int r;
 
        r = amdgpu_ib_ring_tests(adev);
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
-
-       r = amdgpu_device_enable_mgpu_fan_boost();
-       if (r)
-               DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
-
-       /*set to low pstate by default */
-       amdgpu_xgmi_set_pstate(adev, 0);
-
 }
 
 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2355,6 +2423,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        case CHIP_VEGA20:
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case CHIP_RAVEN:
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
+       case CHIP_NAVI10:
 #endif
                return amdgpu_dc != 0;
 #endif
@@ -2466,8 +2537,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        hash_init(adev->mn_hash);
        mutex_init(&adev->lock_reset);
        mutex_init(&adev->virt.dpm_mutex);
+       mutex_init(&adev->psp.mutex);
 
-       amdgpu_device_check_arguments(adev);
+       r = amdgpu_device_check_arguments(adev);
+       if (r)
+               return r;
 
        spin_lock_init(&adev->mmio_idx_lock);
        spin_lock_init(&adev->smc_idx_lock);
@@ -2485,8 +2559,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->ring_lru_list);
        spin_lock_init(&adev->ring_lru_list_lock);
 
-       INIT_DELAYED_WORK(&adev->late_init_work,
-                         amdgpu_device_ip_late_init_func_handler);
+       INIT_DELAYED_WORK(&adev->delayed_init_work,
+                         amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
                          amdgpu_device_delay_enable_gfx_off);
 
@@ -2523,8 +2597,33 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (adev->rio_mem == NULL)
                DRM_INFO("PCI I/O BAR is not found.\n");
 
+       /* enable PCIE atomic ops */
+       r = pci_enable_atomic_ops_to_root(adev->pdev,
+                                         PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+                                         PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+       if (r) {
+               adev->have_atomics_support = false;
+               DRM_INFO("PCIE atomic ops is not supported\n");
+       } else {
+               adev->have_atomics_support = true;
+       }
+
        amdgpu_device_get_pcie_info(adev);
 
+       if (amdgpu_mcbp)
+               DRM_INFO("MCBP is enabled\n");
+
+       if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
+               adev->enable_mes = true;
+
+       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
+               r = amdgpu_discovery_init(adev);
+               if (r) {
+                       dev_err(adev->dev, "amdgpu_discovery_init failed\n");
+                       return r;
+               }
+       }
+
        /* early init functions */
        r = amdgpu_device_ip_early_init(adev);
        if (r)
@@ -2552,19 +2651,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                goto fence_driver_init;
        }
 
-       /* Read BIOS */
-       if (!amdgpu_get_bios(adev)) {
-               r = -EINVAL;
-               goto failed;
-       }
-
-       r = amdgpu_atombios_init(adev);
-       if (r) {
-               dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
-               goto failed;
-       }
-
        /* detect if we are with an SRIOV vbios */
        amdgpu_device_detect_sriov_bios(adev);
 
@@ -2662,10 +2748,17 @@ fence_driver_init:
 
        amdgpu_fbdev_init(adev);
 
+       if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
+               amdgpu_pm_virt_sysfs_init(adev);
+
        r = amdgpu_pm_sysfs_init(adev);
        if (r)
                DRM_ERROR("registering pm debugfs failed (%d).\n", r);
 
+       r = amdgpu_ucode_sysfs_init(adev);
+       if (r)
+               DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
+
        r = amdgpu_debugfs_gem_init(adev);
        if (r)
                DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2706,7 +2799,21 @@ fence_driver_init:
        }
 
        /* must succeed. */
-       amdgpu_ras_post_init(adev);
+       amdgpu_ras_resume(adev);
+
+       queue_delayed_work(system_wq, &adev->delayed_init_work,
+                          msecs_to_jiffies(AMDGPU_RESUME_MS));
+
+       r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+       if (r) {
+               dev_err(adev->dev, "Could not create pcie_replay_count");
+               return r;
+       }
+
+       if (IS_ENABLED(CONFIG_PERF_EVENTS))
+               r = amdgpu_pmu_init(adev);
+       if (r)
+               dev_err(adev->dev, "amdgpu_pmu_init failed\n");
 
        return 0;
 
@@ -2749,7 +2856,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
                adev->firmware.gpu_info_fw = NULL;
        }
        adev->accel_working = false;
-       cancel_delayed_work_sync(&adev->late_init_work);
+       cancel_delayed_work_sync(&adev->delayed_init_work);
        /* free i2c buses */
        if (!amdgpu_device_has_dc_support(adev))
                amdgpu_i2c_fini(adev);
@@ -2770,7 +2877,17 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        iounmap(adev->rmmio);
        adev->rmmio = NULL;
        amdgpu_device_doorbell_fini(adev);
+       if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))
+               amdgpu_pm_virt_sysfs_fini(adev);
+
        amdgpu_debugfs_regs_cleanup(adev);
+       device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
+       amdgpu_ucode_sysfs_fini(adev);
+       if (IS_ENABLED(CONFIG_PERF_EVENTS))
+               amdgpu_pmu_fini(adev);
+       amdgpu_debugfs_preempt_cleanup(adev);
+       if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+               amdgpu_discovery_fini(adev);
 }
 
 
@@ -2810,7 +2927,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
        if (fbcon)
                amdgpu_fbdev_set_suspend(adev, 1);
 
-       cancel_delayed_work_sync(&adev->late_init_work);
+       cancel_delayed_work_sync(&adev->delayed_init_work);
 
        if (!amdgpu_device_has_dc_support(adev)) {
                /* turn off display hw */
@@ -2851,6 +2968,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 
        amdgpu_amdkfd_suspend(adev);
 
+       amdgpu_ras_suspend(adev);
+
        r = amdgpu_device_ip_suspend_phase1(adev);
 
        /* evict vram memory */
@@ -2928,6 +3047,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
        if (r)
                return r;
 
+       queue_delayed_work(system_wq, &adev->delayed_init_work,
+                          msecs_to_jiffies(AMDGPU_RESUME_MS));
+
        if (!amdgpu_device_has_dc_support(adev)) {
                /* pin cursors */
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2951,7 +3073,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
                return r;
 
        /* Make sure IB tests flushed */
-       flush_delayed_work(&adev->late_init_work);
+       flush_delayed_work(&adev->delayed_init_work);
 
        /* blat the mode back in */
        if (fbcon) {
@@ -2971,6 +3093,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
 
        drm_kms_helper_poll_enable(dev);
 
+       amdgpu_ras_resume(adev);
+
        /*
         * Most of the connector probing functions try to acquire runtime pm
         * refs to ensure that the GPU is powered on when connector polling is
@@ -3335,8 +3459,6 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                if (!ring || !ring->sched.thread)
                        continue;
 
-               drm_sched_stop(&ring->sched);
-
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
        }
@@ -3344,8 +3466,7 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        if(job)
                drm_sched_increase_karma(&job->base);
 
-
-
+       /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
        if (!amdgpu_sriov_vf(adev)) {
 
                if (!need_full_reset)
@@ -3452,6 +3573,19 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                if (vram_lost)
                                        amdgpu_device_fill_reset_magic(tmp_adev);
 
+                               /*
+                                * Add this ASIC as tracked as reset was already
+                                * complete successfully.
+                                */
+                               amdgpu_register_gpu_instance(tmp_adev);
+
+                               r = amdgpu_device_ip_late_init(tmp_adev);
+                               if (r)
+                                       goto out;
+
+                               /* must succeed. */
+                               amdgpu_ras_resume(tmp_adev);
+
                                /* Update PSP FW topology after reset */
                                if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
                                        r = amdgpu_xgmi_update_topology(hive, tmp_adev);
@@ -3483,38 +3617,21 @@ end:
        return r;
 }
 
-static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
-                                         struct amdgpu_job *job)
+static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock)
 {
-       int i;
+       if (trylock) {
+               if (!mutex_trylock(&adev->lock_reset))
+                       return false;
+       } else
+               mutex_lock(&adev->lock_reset);
 
-       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-               struct amdgpu_ring *ring = adev->rings[i];
-
-               if (!ring || !ring->sched.thread)
-                       continue;
-
-               if (!adev->asic_reset_res)
-                       drm_sched_resubmit_jobs(&ring->sched);
-
-               drm_sched_start(&ring->sched, !adev->asic_reset_res);
-       }
-
-       if (!amdgpu_device_has_dc_support(adev)) {
-               drm_helper_resume_force_mode(adev->ddev);
-       }
-
-       adev->asic_reset_res = 0;
-}
-
-static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
-{
-       mutex_lock(&adev->lock_reset);
        atomic_inc(&adev->gpu_reset_counter);
        adev->in_gpu_reset = 1;
        /* Block kfd: SRIOV would do it separately */
        if (!amdgpu_sriov_vf(adev))
                 amdgpu_amdkfd_pre_reset(adev);
+
+       return true;
 }
 
 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
@@ -3542,40 +3659,44 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
-       int r;
+       struct list_head device_list, *device_list_handle =  NULL;
+       bool need_full_reset, job_signaled;
        struct amdgpu_hive_info *hive = NULL;
-       bool need_full_reset = false;
        struct amdgpu_device *tmp_adev = NULL;
-       struct list_head device_list, *device_list_handle =  NULL;
+       int i, r = 0;
 
+       need_full_reset = job_signaled = false;
        INIT_LIST_HEAD(&device_list);
 
        dev_info(adev->dev, "GPU reset begin!\n");
 
+       cancel_delayed_work_sync(&adev->delayed_init_work);
+
+       hive = amdgpu_get_xgmi_hive(adev, false);
+
        /*
-        * In case of XGMI hive disallow concurrent resets to be triggered
-        * by different nodes. No point also since the one node already executing
-        * reset will also reset all the other nodes in the hive.
+        * Here we trylock to avoid chain of resets executing from
+        * either trigger by jobs on different adevs in XGMI hive or jobs on
+        * different schedulers for same device while this TO handler is running.
+        * We always reset all schedulers for device and all devices for XGMI
+        * hive so that should take care of them too.
         */
-       hive = amdgpu_get_xgmi_hive(adev, 0);
-       if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
-           !mutex_trylock(&hive->reset_lock))
+
+       if (hive && !mutex_trylock(&hive->reset_lock)) {
+               DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
+                        job->base.id, hive->hive_id);
                return 0;
+       }
 
        /* Start with adev pre asic reset first for soft reset check.*/
-       amdgpu_device_lock_adev(adev);
-       r = amdgpu_device_pre_asic_reset(adev,
-                                        job,
-                                        &need_full_reset);
-       if (r) {
-               /*TODO Should we stop ?*/
-               DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
-                         r, adev->ddev->unique);
-               adev->asic_reset_res = r;
+       if (!amdgpu_device_lock_adev(adev, !hive)) {
+               DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+                                        job->base.id);
+               return 0;
        }
 
        /* Build list of devices to reset */
-       if  (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+       if  (adev->gmc.xgmi.num_physical_nodes > 1) {
                if (!hive) {
                        amdgpu_device_unlock_adev(adev);
                        return -ENODEV;
@@ -3592,13 +3713,67 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                device_list_handle = &device_list;
        }
 
+       /*
+        * Mark these ASICs to be reseted as untracked first
+        * And add them back after reset completed
+        */
+       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head)
+               amdgpu_unregister_gpu_instance(tmp_adev);
+
+       /* block all schedulers and reset given job's ring */
+       list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+               /* disable ras on ALL IPs */
+               if (amdgpu_device_ip_need_full_reset(tmp_adev))
+                       amdgpu_ras_suspend(tmp_adev);
+
+               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+                       struct amdgpu_ring *ring = tmp_adev->rings[i];
+
+                       if (!ring || !ring->sched.thread)
+                               continue;
+
+                       drm_sched_stop(&ring->sched, &job->base);
+               }
+       }
+
+
+       /*
+        * Must check guilty signal here since after this point all old
+        * HW fences are force signaled.
+        *
+        * job->base holds a reference to parent fence
+        */
+       if (job && job->base.s_fence->parent &&
+           dma_fence_is_signaled(job->base.s_fence->parent))
+               job_signaled = true;
+
+       if (!amdgpu_device_ip_need_full_reset(adev))
+               device_list_handle = &device_list;
+
+       if (job_signaled) {
+               dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+               goto skip_hw_reset;
+       }
+
+
+       /* Guilty job will be freed after this*/
+       r = amdgpu_device_pre_asic_reset(adev,
+                                        job,
+                                        &need_full_reset);
+       if (r) {
+               /*TODO Should we stop ?*/
+               DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+                         r, adev->ddev->unique);
+               adev->asic_reset_res = r;
+       }
+
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
 
                if (tmp_adev == adev)
                        continue;
 
-               amdgpu_device_lock_adev(tmp_adev);
+               amdgpu_device_lock_adev(tmp_adev, false);
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
                                                 &need_full_reset);
@@ -3622,9 +3797,28 @@ retry:   /* Rest of adevs pre asic reset from XGMI hive. */
                        goto retry;
        }
 
+skip_hw_reset:
+
        /* Post ASIC reset for all devs .*/
        list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-               amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+               for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+                       struct amdgpu_ring *ring = tmp_adev->rings[i];
+
+                       if (!ring || !ring->sched.thread)
+                               continue;
+
+                       /* No point to resubmit jobs if we didn't HW reset*/
+                       if (!tmp_adev->asic_reset_res && !job_signaled)
+                               drm_sched_resubmit_jobs(&ring->sched);
+
+                       drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
+               }
+
+               if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+                       drm_helper_resume_force_mode(tmp_adev->ddev);
+               }
+
+               tmp_adev->asic_reset_res = 0;
 
                if (r) {
                        /* bad news, how to tell it to userspace ? */
@@ -3637,7 +3831,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                amdgpu_device_unlock_adev(tmp_adev);
        }
 
-       if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+       if (hive)
                mutex_unlock(&hive->reset_lock);
 
        if (r)
@@ -3645,43 +3839,6 @@ retry:   /* Rest of adevs pre asic reset from XGMI hive. */
        return r;
 }
 
-static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
-                                                 enum pci_bus_speed *speed,
-                                                 enum pcie_link_width *width)
-{
-       struct pci_dev *pdev = adev->pdev;
-       enum pci_bus_speed cur_speed;
-       enum pcie_link_width cur_width;
-       u32 ret = 1;
-
-       *speed = PCI_SPEED_UNKNOWN;
-       *width = PCIE_LNK_WIDTH_UNKNOWN;
-
-       while (pdev) {
-               cur_speed = pcie_get_speed_cap(pdev);
-               cur_width = pcie_get_width_cap(pdev);
-               ret = pcie_bandwidth_available(adev->pdev, NULL,
-                                                      NULL, &cur_width);
-               if (!ret)
-                       cur_width = PCIE_LNK_WIDTH_RESRV;
-
-               if (cur_speed != PCI_SPEED_UNKNOWN) {
-                       if (*speed == PCI_SPEED_UNKNOWN)
-                               *speed = cur_speed;
-                       else if (cur_speed < *speed)
-                               *speed = cur_speed;
-               }
-
-               if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
-                       if (*width == PCIE_LNK_WIDTH_UNKNOWN)
-                               *width = cur_width;
-                       else if (cur_width < *width)
-                               *width = cur_width;
-               }
-               pdev = pci_upstream_bridge(pdev);
-       }
-}
-
 /**
  * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
  *
@@ -3715,8 +3872,8 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
        if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
                return;
 
-       amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
-                                             &platform_link_width);
+       pcie_bandwidth_available(adev->pdev, NULL,
+                                &platform_speed_cap, &platform_link_width);
 
        if (adev->pm.pcie_gen_mask == 0) {
                /* asic caps */