drm/connector: Allow max possible encoders to attach to a connector
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_ai.c
index 2471e7cf75eac4cd565b1e0b070f862dc3b9c444..235548c0b41f0e87758a9c815ee7e3c5f02ed64f 100644 (file)
@@ -26,6 +26,7 @@
 #include "nbio/nbio_6_1_sh_mask.h"
 #include "gc/gc_9_0_offset.h"
 #include "gc/gc_9_0_sh_mask.h"
+#include "mp/mp_9_0_offset.h"
 #include "soc15.h"
 #include "vega10_ih.h"
 #include "soc15_common.h"
@@ -343,7 +344,7 @@ flr_done:
 
        /* Trigger recovery for world switch failure if no TDR */
        if (amdgpu_device_should_recover_gpu(adev)
-               && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
+               && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
                amdgpu_device_gpu_recover(adev, NULL);
 }
 
@@ -448,6 +449,20 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
        amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 }
 
+static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
+{
+       adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
+
+       /* Enable L1 security reg access mode by defaul,  as non-security VF
+        * will no longer be supported.
+        */
+       adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
+
+       adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
+
+       adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
+}
+
 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
        .req_full_gpu   = xgpu_ai_request_full_gpu_access,
        .rel_full_gpu   = xgpu_ai_release_full_gpu_access,
@@ -456,4 +471,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
        .trans_msg = xgpu_ai_mailbox_trans_msg,
        .get_pp_clk = xgpu_ai_get_pp_clk,
        .force_dpm_level = xgpu_ai_force_dpm_level,
+       .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
 };