drm/amdgpu: enable UMSCH 4.0.6
authorLang Yu <Lang.Yu@amd.com>
Thu, 7 Mar 2024 05:57:06 +0000 (13:57 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 27 Mar 2024 13:30:05 +0000 (09:30 -0400)
Share same codes with 4.0.5 and enable collaborate mode for VPE.

Signed-off-by: Lang Yu <Lang.Yu@amd.com>
Reviewed-by: Veerabadhran Gopalakrishnan <Veerabadhran.Gopalakrishnan@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c

index a07e4b87d4cae01af53ef4cfded4cc4051dbd1c7..fdd36fb027ab6aa04b31c790af80596bb7da0427 100644 (file)
@@ -2237,6 +2237,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
 {
        switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
        case IP_VERSION(4, 0, 5):
+       case IP_VERSION(4, 0, 6):
                if (amdgpu_umsch_mm & 0x1) {
                        amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
                        adev->enable_umsch_mm = true;
index ab820cf526683b5a15ef94ee28075f20752baeac..0df97c3e3a700dccc7bc84d4688e1dc8e1b8a1e2 100644 (file)
@@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev,
        mqd->rptr_val = 0;
        mqd->unmapped = 1;
 
+       if (adev->vpe.collaborate_mode)
+               memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO));
+
        qinfo->mqd_addr = test->mqd_data_gpu_addr;
        qinfo->csa_addr = test->ctx_data_gpu_addr +
                offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa);
-       qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1;
+       qinfo->doorbell_offset_0 = 0;
        qinfo->doorbell_offset_1 = 0;
 }
 
@@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te
        ring[5] = 0;
 
        mqd->wptr_val = (6 << 2);
-       // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
+       if (adev->vpe.collaborate_mode)
+               (++mqd)->wptr_val = (6 << 2);
+
+       WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val);
 
        for (i = 0; i < adev->usec_timeout; i++) {
                if (*fence == test_pattern)
@@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
 
        switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
        case IP_VERSION(4, 0, 5):
+       case IP_VERSION(4, 0, 6):
                fw_name = "amdgpu/umsch_mm_4_0_0.bin";
                break;
        default:
@@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle)
 
        switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
        case IP_VERSION(4, 0, 5):
+       case IP_VERSION(4, 0, 6):
                umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
                break;
        default:
index 8e7b763cfdb7ef72ba8c98ea4e33515ffa4e8d81..84368cf1e17535c16c031ce6677f53769f9e8f94 100644 (file)
@@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch)
 
        umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr;
 
-       if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+       if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
                WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
                        1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
                SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -248,7 +248,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch)
        data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0);
        WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data);
 
-       if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) {
+       if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) {
                WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG,
                        2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT);
                SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS,
@@ -271,6 +271,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch)
 
        set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn;
        set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe;
+       set_hw_resources.collaboration_mask_vpe =
+               adev->vpe.collaborate_mode ? 0x3 : 0x0;
        set_hw_resources.engine_mask = umsch->engine_mask;
 
        set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask;
@@ -346,6 +348,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch,
        add_queue.h_queue = input_ptr->h_queue;
        add_queue.vm_context_cntl = input_ptr->vm_context_cntl;
        add_queue.is_context_suspended = input_ptr->is_context_suspended;
+       add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0;
 
        add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr;
        add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq;