Merge tag 'amd-drm-next-6.9-2024-02-09' of https://gitlab.freedesktop.org/agd5f/linux...
authorDave Airlie <airlied@redhat.com>
Tue, 13 Feb 2024 01:32:23 +0000 (11:32 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 13 Feb 2024 01:32:23 +0000 (11:32 +1000)
amd-drm-next-6.9-2024-02-09:

amdgpu:
- Validate DMABuf imports in compute VMs
- Add RAS ACA framework
- PSP 13 fixes
- Misc code cleanups
- Replay fixes
- Atom interpretor PS, WS bounds checking
- DML2 fixes
- Audio fixes
- DCN 3.5 Z state fixes
- Remove deprecated ida_simple usage
- UBSAN fixes
- RAS fixes
- Enable seq64 infrastructure
- DC color block enablement
- Documentation updates
- DC documentation updates
- DMCUB updates
- S3 fixes
- VCN 4.0.5 fixes
- DP MST fixes
- SR-IOV fixes

amdkfd:
- Validate DMABuf imports in compute VMs
- SVM fixes
- Trap handler updates

radeon:
- Atom interpretor PS, WS bounds checking
- Misc code cleanups

UAPI:
- Bump KFD version so UMDs know that the fixes that enable the management of
  VA mappings in compute VMs using the GEM_VA ioctl for DMABufs exported from KFD are present
- Add INFO query for input power.  This matches the existing INFO query for average
  power.  Used in gaming HUDs, etc.
  Example userspace: https://github.com/Umio-Yasuno/libdrm-amdgpu-sys-rs/tree/input_power

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240209221459.5453-1-alexander.deucher@amd.com
296 files changed:
Documentation/gpu/amdgpu/dgpu-asic-info-table.csv
Documentation/gpu/amdgpu/display/dcn-blocks.rst [new file with mode: 0644]
Documentation/gpu/amdgpu/display/display-contributing.rst [new file with mode: 0644]
Documentation/gpu/amdgpu/display/display-manager.rst
Documentation/gpu/amdgpu/display/index.rst
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/atom.h
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
drivers/gpu/drm/amd/amdgpu/clearstate_si.h
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
drivers/gpu/drm/amd/amdgpu/umc_v6_0.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.h
drivers/gpu/drm/amd/amdkfd/kfd_debug.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/TODO [deleted file]
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
drivers/gpu/drm/amd/display/dc/basics/conversion.c
drivers/gpu/drm/amd/display/dc/basics/conversion.h
drivers/gpu/drm/amd/display/dc/bios/command_table.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_state.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c
drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
drivers/gpu/drm/amd/display/dc/link/link_detection.c
drivers/gpu/drm/amd/display/dc/link/link_dpms.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/audio_types.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/arct_ip_offset.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h
drivers/gpu/drm/amd/include/atom-bits.h
drivers/gpu/drm/amd/include/beige_goby_ip_offset.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h
drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h
drivers/gpu/drm/amd/include/dm_pp_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/include/navi12_ip_offset.h
drivers/gpu/drm/amd/include/navi14_ip_offset.h
drivers/gpu/drm/amd/include/pptable.h
drivers/gpu/drm/amd/include/renoir_ip_offset.h
drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h
drivers/gpu/drm/amd/include/v10_structs.h
drivers/gpu/drm/amd/include/vangogh_ip_offset.h
drivers/gpu/drm/amd/include/vega10_ip_offset.h
drivers/gpu/drm/amd/include/vega20_ip_offset.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/radeon/atom-bits.h
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/atom.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/atombios_i2c.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/ci_dpm.h
drivers/gpu/drm/radeon/clearstate_cayman.h
drivers/gpu/drm/radeon/clearstate_ci.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreen_smc.h
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/kv_smc.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/ni_dpm.h
drivers/gpu/drm/radeon/nislands_smc.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300_reg.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_dpm.c
drivers/gpu/drm/radeon/r600_dpm.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv6xx_dpm.h
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/rv770_smc.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/si_dpm.h
drivers/gpu/drm/radeon/smu7.h
drivers/gpu/drm/radeon/smu7_discrete.h
drivers/gpu/drm/radeon/smu7_fusion.h
drivers/gpu/drm/radeon/sumo_dpm.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/radeon/trinity_dpm.h
drivers/gpu/drm/radeon/uvd_v1_0.c
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/kfd_ioctl.h

index 882d2518f8ed26526016f770d97cc168f4597e73..3825f00ca9fe84d7bce332efda1a350ba1ed6b07 100644 (file)
@@ -16,6 +16,7 @@ Radeon (RX|TM) (PRO|WX) Vega /MI25 /V320 /V340L /8200 /9100 /SSG MxGPU, VEGA10,
 AMD Radeon (Pro) VII /MI50 /MI60, VEGA20, DCE 12, 9.4.0, VCE 4.1.0 / UVD 7.2.0, 4.2.0
 MI100, ARCTURUS, *, 9.4.1, VCN 2.5.0, 4.2.2
 MI200, ALDEBARAN, *, 9.4.2, VCN 2.6.0, 4.4.0
+MI300, AQUA_VANGARAM, *, 9.4.3, VCN 4.0.3, 4.4.2
 AMD Radeon (RX|Pro) 5600(M|XT) /5700 (M|XT|XTB) /W5700, NAVI10, DCN 2.0.0, 10.1.10, VCN 2.0.0, 5.0.0
 AMD Radeon (Pro) 5300 /5500XTB/5500(XT|M) /W5500M /W5500, NAVI14, DCN 2.0.0, 10.1.1, VCN 2.0.2, 5.0.2
 AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN 3.0.0, 5.2.0
@@ -23,4 +24,5 @@ AMD Radeon RX 6700 XT / 6800M / 6700M, NAVY_FLOUNDER, DCN 3.0.0, 10.3.2, VCN 3.0
 AMD Radeon RX 6600(XT) /6600M /W6600 /W6600M, DIMGREY_CAVEFISH, DCN 3.0.2, 10.3.4, VCN 3.0.16, 5.2.4
 AMD Radeon RX 6500M /6300M /W6500M /W6300M, BEIGE_GOBY, DCN 3.0.3, 10.3.5, VCN 3.0.33, 5.2.5
 AMD Radeon RX 7900 XT /XTX, , DCN 3.2.0, 11.0.0, VCN 4.0.0, 6.0.0
+AMD Radeon RX 7800 XT, , DCN 3.2.0, 11.0.3, VCN 4.0.0, 6.0.3
 AMD Radeon RX 7600M (XT) /7700S /7600S, , DCN 3.2.1, 11.0.2, VCN 4.0.4, 6.0.2
diff --git a/Documentation/gpu/amdgpu/display/dcn-blocks.rst b/Documentation/gpu/amdgpu/display/dcn-blocks.rst
new file mode 100644 (file)
index 0000000..a3fbd3e
--- /dev/null
@@ -0,0 +1,78 @@
+==========
+DCN Blocks
+==========
+
+In this section, you will find some extra details about some of the DCN blocks
+and the code documentation when it is automatically generated.
+
+DCHUBBUB
+--------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :internal:
+
+HUBP
+----
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :internal:
+
+DPP
+---
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+   :internal:
+
+MPC
+---
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+   :internal:
+
+OPP
+---
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+   :internal:
+
+DIO
+---
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+   :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+   :internal:
diff --git a/Documentation/gpu/amdgpu/display/display-contributing.rst b/Documentation/gpu/amdgpu/display/display-contributing.rst
new file mode 100644 (file)
index 0000000..fdb2bea
--- /dev/null
@@ -0,0 +1,168 @@
+.. _display_todos:
+
+==============================
+AMDGPU - Display Contributions
+==============================
+
+First of all, if you are here, you probably want to give some technical
+contribution to the display code, and for that, we say thank you :)
+
+This page summarizes some of the issues you can help with; keep in mind that
+this is a static page, and it is always a good idea to try to reach developers
+in the amdgfx or some of the maintainers. Finally, this page follows the DRM
+way of creating a TODO list; for more information, check
+'Documentation/gpu/todo.rst'.
+
+Gitlab issues
+=============
+
+Users can report issues associated with AMD GPUs at:
+
+- https://gitlab.freedesktop.org/drm/amd
+
+Usually, we try to add a proper label to all new tickets to make it easy to
+filter issues. If you can reproduce any problem, you could help by adding more
+information or fixing the issue.
+
+Level: diverse
+
+IGT
+===
+
+`IGT`_ provides many integration tests that can be run on your GPU. We always
+want to pass a large set of tests to increase the test coverage in our CI. If
+you wish to contribute to the display code but are unsure where a good place
+is, we recommend you run all IGT tests and try to fix any failure you see in
+your hardware. Keep in mind that this failure can be an IGT problem or a kernel
+issue; it is necessary to analyze case-by-case.
+
+Level: diverse
+
+.. _IGT: https://gitlab.freedesktop.org/drm/igt-gpu-tools
+
+Compilation
+===========
+
+Fix compilation warnings
+------------------------
+
+Enable the W1 or W2 warning level in the kernel compilation and try to fix the
+issues on the display side.
+
+Level: Starter
+
+Fix compilation issues when using um architecture
+-------------------------------------------------
+
+Linux has a User-mode Linux (UML) feature, and the kernel can be compiled to
+the **um** architecture. Compiling for **um** can bring multiple advantages
+from the test perspective. We currently have some compilation issues in this
+area that we need to fix.
+
+Level: Intermediate
+
+Code Refactor
+=============
+
+Add prefix to DC functions to improve the debug with ftrace
+-----------------------------------------------------------
+
+The Ftrace debug feature (check 'Documentation/trace/ftrace.rst') is a
+fantastic way to check the code path when developers try to make sense of a
+bug. Ftrace provides a filter mechanism that can be useful when the developer
+has some hunch of which part of the code can cause the issue; for this reason,
+if a set of functions has a proper prefix, it becomes easy to create a good
+filter. Additionally, prefixes can improve stack trace readability.
+
+The DC code does not follow some prefix rules, which makes the Ftrace filter
+more complicated and reduces the readability of the stack trace. If you want
+something simple to start contributing to the display, you can make patches for
+adding prefixes to DC functions. To create those prefixes, use part of the file
+name as a prefix for all functions in the target file. Check the
+'amdgpu_dm_crtc.c` and `amdgpu_dm_plane.c` for some references. However, we
+strongly advise not to send huge patches changing these prefixes; otherwise, it
+will be hard to review and test, which can generate second thoughts from
+maintainers. Try small steps; in case of double, you can ask before you put in
+effort. We recommend first looking at folders like dceXYZ, dcnXYZ, basics,
+bios, core, clk_mgr, hwss, resource, and irq.
+
+Level: Starter
+
+Reduce code duplication
+-----------------------
+
+AMD has an extensive portfolio with various dGPUs and APUs that amdgpu
+supports. To maintain the new hardware release cadence, DCE/DCN was designed in
+a modular design, making the bring-up for new hardware fast. Over the years,
+amdgpu accumulated some technical debt in the code duplication area. For this
+task, it would be a good idea to find a tool that can discover code duplication
+(including patterns) and use it as guidance to reduce duplications.
+
+Level: Intermediate
+
+Make atomic_commit_[check|tail] more readable
+---------------------------------------------
+
+The functions responsible for atomic commit and tail are intricate and
+extensive. In particular `amdgpu_dm_atomic_commit_tail` is a long function and
+could benefit from being split into smaller helpers. Improvements in this area
+are more than welcome, but keep in mind that changes in this area will affect
+all ASICs, meaning that refactoring requires a comprehensive verification; in
+other words, this effort can take some time for validation.
+
+Level: Advanced
+
+Documentation
+=============
+
+Expand kernel-doc
+-----------------
+
+Many DC functions do not have a proper kernel-doc; understanding a function and
+adding documentation is a great way to learn more about the amdgpu driver and
+also leave an outstanding contribution to the entire community.
+
+Level: Starter
+
+Beyond AMDGPU
+=============
+
+AMDGPU provides features that are not yet enabled in the userspace. This
+section highlights some of the coolest display features, which could be enabled
+with the userspace developer helper.
+
+Enable underlay
+---------------
+
+AMD display has this feature called underlay (which you can read more about at
+'Documentation/GPU/amdgpu/display/mpo-overview.rst') which is intended to
+save power when playing a video. The basic idea is to put a video in the
+underlay plane at the bottom and the desktop in the plane above it with a hole
+in the video area. This feature is enabled in ChromeOS, and from our data
+measurement, it can save power.
+
+Level: Unknown
+
+Adaptive Backlight Modulation (ABM)
+-----------------------------------
+
+ABM is a feature that adjusts the display panel's backlight level and pixel
+values depending on the displayed image. This power-saving feature can be very
+useful when the system starts to run off battery; since this will impact the
+display output fidelity, it would be good if this option was something that
+users could turn on or off.
+
+Level: Unknown
+
+
+HDR & Color management & VRR
+----------------------------
+
+HDR, Color Management, and VRR are huge topics and it's hard to put these into
+concise ToDos. If you are interested in this topic, we recommend checking some
+blog posts from the community developers to better understand some of the
+specific challenges and people working on the subject. If anyone wants to work
+on some particular part, we can try to help with some basic guidance. Finally,
+keep in mind that we already have some kernel-doc in place for those areas.
+
+Level: Unknown
index be2651ecdd7f2ab8b1560444ac944ba2da1f7f68..67a811e6891fb3b0266bb253fd4c8997501fab8d 100644 (file)
@@ -131,9 +131,6 @@ The DRM blend mode and its elements are then mapped by AMDGPU display manager
 (DM) to program the blending configuration of the Multiple Pipe/Plane Combined
 (MPC), as follows:
 
-.. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
-   :doc: mpc-overview
-
 .. kernel-doc:: drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
    :functions: mpcc_blnd_cfg
 
index f8a4f53d70d8c6124100a4582b7342253315360b..f0c342e00a392bf5a57cec05c6dbd89a5d8c9f39 100644 (file)
@@ -7,18 +7,80 @@ drm/amd/display - Display Core (DC)
 AMD display engine is partially shared with other operating systems; for this
 reason, our Display Core Driver is divided into two pieces:
 
-1. **Display Core (DC)** contains the OS-agnostic components. Things like
+#. **Display Core (DC)** contains the OS-agnostic components. Things like
    hardware programming and resource management are handled here.
-2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
-   amdgpu base driver and DRM are implemented here.
+#. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+   amdgpu base driver and DRM are implemented here. For example, you can check
+   display/amdgpu_dm/ folder.
+
+------------------
+DC Code validation
+------------------
+
+Maintaining the same code base across multiple OSes requires a lot of
+synchronization effort between repositories and exhaustive validation. In the
+DC case, we maintain a tree to centralize code from different parts. The shared
+repository has integration tests with our Internal Linux CI farm, and we run a
+comprehensive set of IGT tests in various AMD GPUs/APUs (mostly recent dGPUs
+and APUs). Our CI also checks ARM64/32, PPC64/32, and x86_64/32 compilation
+with DCN enabled and disabled.
+
+When we upstream a new feature or some patches, we pack them in a patchset with
+the prefix **DC Patches for <DATE>**, which is created based on the latest
+`amd-staging-drm-next <https://gitlab.freedesktop.org/agd5f/linux>`_. All of
+those patches are under a DC version tested as follows:
+
+* Ensure that every patch compiles and the entire series pass our set of IGT
+  test in different hardware.
+* Prepare a branch with those patches for our validation team. If there is an
+  error, a developer will debug as fast as possible; usually, a simple bisect
+  in the series is enough to point to a bad change, and two possible actions
+  emerge: fix the issue or drop the patch. If it is not an easy fix, the bad
+  patch is dropped.
+* Finally, developers wait a few days for community feedback before we merge
+  the series.
+
+It is good to stress that the test phase is something that we take extremely
+seriously, and we never merge anything that fails our validation. Follows an
+overview of our test set:
+
+#. Manual test
+    * Multiple Hotplugs with DP and HDMI.
+    * Stress test with multiple display configuration changes via the user interface.
+    * Validate VRR behaviour.
+    * Check PSR.
+    * Validate MPO when playing video.
+    * Test more than two displays connected at the same time.
+    * Check suspend/resume.
+    * Validate FPO.
+    * Check MST.
+#. Automated test
+    * IGT tests in a farm with GPUs and APUs that support DCN and DCE.
+    * Compilation validation with the latest GCC and Clang from LTS distro.
+    * Cross-compilation for PowerPC 64/32, ARM 64/32, and x86 32.
+
+In terms of test setup for CI and manual tests, we usually use:
+
+#. The latest Ubuntu LTS.
+#. In terms of userspace, we only use fully updated open-source components
+   provided by the distribution official package manager.
+#. Regarding IGT, we use the latest code from the upstream.
+#. Most of the manual tests are conducted in the GNome but we also use KDE.
+
+Notice that someone from our test team will always reply to the cover letter
+with the test report.
+
+--------------
+DC Information
+--------------
 
 The display pipe is responsible for "scanning out" a rendered frame from the
 GPU memory (also called VRAM, FrameBuffer, etc.) to a display. In other words,
 it would:
 
-1. Read frame information from memory;
-2. Perform required transformation;
-3. Send pixel data to sink devices.
+#. Read frame information from memory;
+#. Perform required transformation;
+#. Send pixel data to sink devices.
 
 If you want to learn more about our driver details, take a look at the below
 table of content:
@@ -26,7 +88,9 @@ table of content:
 .. toctree::
 
    display-manager.rst
-   dc-debug.rst
    dcn-overview.rst
+   dcn-blocks.rst
    mpo-overview.rst
+   dc-debug.rst
+   display-contributing.rst
    dc-glossary.rst
index 260e32ef7bae0ff299bcbeaf9750e15d6cfacaff..4c989da4d2f368f532de504b5cd09cbfa08c3a6e 100644 (file)
@@ -80,7 +80,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
        amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
        amdgpu_fw_attestation.o amdgpu_securedisplay.o \
        amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
-       amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o
+       amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o
 
 amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
 
index 3d8a48f46b015613dc44517ebd20d5250df5a3b1..312dfaec7b4a746f5568bc36caf510c6d70ee014 100644 (file)
 #include "amdgpu_smuio.h"
 #include "amdgpu_fdinfo.h"
 #include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_xcp.h"
 #include "amdgpu_seq64.h"
 
 #define MAX_GPU_INSTANCE               64
 
-struct amdgpu_gpu_instance
-{
+struct amdgpu_gpu_instance {
        struct amdgpu_device            *adev;
        int                             mgpu_fan_enabled;
 };
 
-struct amdgpu_mgpu_info
-{
+struct amdgpu_mgpu_info {
        struct amdgpu_gpu_instance      gpu_ins[MAX_GPU_INSTANCE];
        struct mutex                    mutex;
        uint32_t                        num_gpu;
@@ -140,8 +139,7 @@ enum amdgpu_ss {
        AMDGPU_SS_DRV_UNLOAD
 };
 
-struct amdgpu_watchdog_timer
-{
+struct amdgpu_watchdog_timer {
        bool timeout_fatal_disable;
        uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
 };
@@ -1045,6 +1043,9 @@ struct amdgpu_device {
        /* MCA */
        struct amdgpu_mca               mca;
 
+       /* ACA */
+       struct amdgpu_aca               aca;
+
        struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
        uint32_t                        harvest_ip_mask;
        int                             num_ip_blocks;
@@ -1078,6 +1079,8 @@ struct amdgpu_device {
        bool                            in_s3;
        bool                            in_s4;
        bool                            in_s0ix;
+       /* indicate amdgpu suspension status */
+       bool                            suspend_complete;
 
        enum pp_mp1_state               mp1_state;
        struct amdgpu_doorbell_index doorbell_index;
@@ -1329,6 +1332,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define WREG32_FIELD_OFFSET(reg, offset, field, val)   \
        WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
 
+#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
 /*
  * BIOS helpers.
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
new file mode 100644 (file)
index 0000000..493982f
--- /dev/null
@@ -0,0 +1,879 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_aca.h"
+#include "amdgpu_ras.h"
+
+#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
+
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data);
+
+struct aca_banks {
+       int nr_banks;
+       struct list_head list;
+};
+
+struct aca_hwip {
+       int hwid;
+       int mcatype;
+};
+
+static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
+       ACA_BANK_HWID(SMU,      0x01,   0x01),
+       ACA_BANK_HWID(PCS_XGMI, 0x50,   0x00),
+       ACA_BANK_HWID(UMC,      0x96,   0x00),
+};
+
+static void aca_banks_init(struct aca_banks *banks)
+{
+       if (!banks)
+               return;
+
+       memset(banks, 0, sizeof(*banks));
+       INIT_LIST_HEAD(&banks->list);
+}
+
+static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
+{
+       struct aca_bank_node *node;
+
+       if (!bank)
+               return -EINVAL;
+
+       node = kvzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       memcpy(&node->bank, bank, sizeof(*bank));
+
+       INIT_LIST_HEAD(&node->node);
+       list_add_tail(&node->node, &banks->list);
+
+       banks->nr_banks++;
+
+       return 0;
+}
+
+static void aca_banks_release(struct aca_banks *banks)
+{
+       struct aca_bank_node *node, *tmp;
+
+       list_for_each_entry_safe(node, tmp, &banks->list, node) {
+               list_del(&node->node);
+               kvfree(node);
+       }
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+       if (!count)
+               return -EINVAL;
+
+       if (!smu_funcs || !smu_funcs->get_valid_aca_count)
+               return -EOPNOTSUPP;
+
+       return smu_funcs->get_valid_aca_count(adev, type, count);
+}
+
+static struct aca_regs_dump {
+       const char *name;
+       int reg_idx;
+} aca_regs[] = {
+       {"CONTROL",             ACA_REG_IDX_CTL},
+       {"STATUS",              ACA_REG_IDX_STATUS},
+       {"ADDR",                ACA_REG_IDX_ADDR},
+       {"MISC",                ACA_REG_IDX_MISC0},
+       {"CONFIG",              ACA_REG_IDX_CONFG},
+       {"IPID",                ACA_REG_IDX_IPID},
+       {"SYND",                ACA_REG_IDX_SYND},
+       {"DESTAT",              ACA_REG_IDX_DESTAT},
+       {"DEADDR",              ACA_REG_IDX_DEADDR},
+       {"CONTROL_MASK",        ACA_REG_IDX_CTL_MASK},
+};
+
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank)
+{
+       int i;
+
+       dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+       /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+       for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+               dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+                        idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type,
+                                      int start, int count,
+                                      struct aca_banks *banks)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+       struct aca_bank bank;
+       int i, max_count, ret;
+
+       if (!count)
+               return 0;
+
+       if (!smu_funcs || !smu_funcs->get_valid_aca_bank)
+               return -EOPNOTSUPP;
+
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+               max_count = smu_funcs->max_ue_bank_count;
+               break;
+       case ACA_ERROR_TYPE_CE:
+               max_count = smu_funcs->max_ce_bank_count;
+               break;
+       case ACA_ERROR_TYPE_DEFERRED:
+       default:
+               return -EINVAL;
+       }
+
+       if (start + count >= max_count)
+               return -EINVAL;
+
+       count = min_t(int, count, max_count);
+       for (i = 0; i < count; i++) {
+               memset(&bank, 0, sizeof(bank));
+               ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank);
+               if (ret)
+                       return ret;
+
+               aca_smu_bank_dump(adev, i, count, &bank);
+
+               ret = aca_banks_add_bank(banks, &bank);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+       struct aca_hwip *hwip;
+       int hwid, mcatype;
+       u64 ipid;
+
+       if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+               return false;
+
+       hwip = &aca_hwid_mcatypes[type];
+       if (!hwip->hwid)
+               return false;
+
+       ipid = bank->regs[ACA_REG_IDX_IPID];
+       hwid = ACA_REG__IPID__HARDWAREID(ipid);
+       mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+       return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type)
+{
+       const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+       if (!aca_bank_hwip_is_matched(bank, handle->hwip))
+               return false;
+
+       if (!bank_ops->aca_bank_is_valid)
+               return true;
+
+       return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data);
+}
+
+static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+       struct aca_bank_error *bank_error;
+
+       bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
+       if (!bank_error)
+               return NULL;
+
+       INIT_LIST_HEAD(&bank_error->node);
+       memcpy(&bank_error->info, info, sizeof(*info));
+
+       mutex_lock(&aerr->lock);
+       list_add_tail(&bank_error->node, &aerr->list);
+       mutex_unlock(&aerr->lock);
+
+       return bank_error;
+}
+
+static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+       struct aca_bank_error *bank_error = NULL;
+       struct aca_bank_info *tmp_info;
+       bool found = false;
+
+       mutex_lock(&aerr->lock);
+       list_for_each_entry(bank_error, &aerr->list, node) {
+               tmp_info = &bank_error->info;
+               if (tmp_info->socket_id == info->socket_id &&
+                   tmp_info->die_id == info->die_id) {
+                       found = true;
+                       goto out_unlock;
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&aerr->lock);
+
+       return found ? bank_error : NULL;
+}
+
+static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error)
+{
+       if (!aerr || !bank_error)
+               return;
+
+       list_del(&bank_error->node);
+       aerr->nr_errors--;
+
+       kvfree(bank_error);
+}
+
+static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+       struct aca_bank_error *bank_error;
+
+       if (!aerr || !info)
+               return NULL;
+
+       bank_error = find_bank_error(aerr, info);
+       if (bank_error)
+               return bank_error;
+
+       return new_bank_error(aerr, info);
+}
+
+static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type,
+                         struct aca_bank_report *report)
+{
+       struct aca_error_cache *error_cache = &handle->error_cache;
+       struct aca_bank_error *bank_error;
+       struct aca_error *aerr;
+
+       if (!handle || !report)
+               return -EINVAL;
+
+       if (!report->count[type])
+               return 0;
+
+       aerr = &error_cache->errors[type];
+       bank_error = get_bank_error(aerr, &report->info);
+       if (!bank_error)
+               return -ENOMEM;
+
+       bank_error->count[type] += report->count[type];
+
+       return 0;
+}
+
+static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank,
+                                   enum aca_error_type type, struct aca_bank_report *report)
+{
+       const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+       if (!bank || !report)
+               return -EINVAL;
+
+       if (!bank_ops->aca_bank_generate_report)
+               return -EOPNOTSUPP;
+
+       memset(report, 0, sizeof(*report));
+       return bank_ops->aca_bank_generate_report(handle, bank, type,
+                                                 report, handle->data);
+}
+
+static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
+                                     enum aca_error_type type, void *data)
+{
+       struct aca_bank_report report;
+       int ret;
+
+       ret = aca_generate_bank_report(handle, bank, type, &report);
+       if (ret)
+               return ret;
+
+       if (!report.count[type])
+               return 0;
+
+       ret = aca_log_errors(handle, type, &report);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
+                            enum aca_error_type type, bank_handler_t handler, void *data)
+{
+       struct aca_handle *handle;
+       int ret;
+
+       if (list_empty(&mgr->list))
+               return 0;
+
+       list_for_each_entry(handle, &mgr->list, node) {
+               if (!aca_bank_is_valid(handle, bank, type))
+                       continue;
+
+               ret = handler(handle, bank, type, data);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
+                             enum aca_error_type type, bank_handler_t handler, void *data)
+{
+       struct aca_bank_node *node;
+       struct aca_bank *bank;
+       int ret;
+
+       if (!mgr || !banks)
+               return -EINVAL;
+
+       /* pre check to avoid unnecessary operations */
+       if (list_empty(&mgr->list) || list_empty(&banks->list))
+               return 0;
+
+       list_for_each_entry(node, &banks->list, node) {
+               bank = &node->bank;
+
+               ret = aca_dispatch_bank(mgr, bank, type, handler, data);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type,
+                           bank_handler_t handler, void *data)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       struct aca_banks banks;
+       u32 count = 0;
+       int ret;
+
+       if (list_empty(&aca->mgr.list))
+               return 0;
+
+       /* NOTE: pmfw is only support UE and CE */
+       if (type == ACA_ERROR_TYPE_DEFERRED)
+               type = ACA_ERROR_TYPE_CE;
+
+       ret = aca_smu_get_valid_aca_count(adev, type, &count);
+       if (ret)
+               return ret;
+
+       if (!count)
+               return 0;
+
+       aca_banks_init(&banks);
+
+       ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks);
+       if (ret)
+               goto err_release_banks;
+
+       if (list_empty(&banks.list)) {
+               ret = 0;
+               goto err_release_banks;
+       }
+
+       ret = aca_dispatch_banks(&aca->mgr, &banks, type,
+                                handler, data);
+       if (ret)
+               goto err_release_banks;
+
+err_release_banks:
+       aca_banks_release(&banks);
+
+       return ret;
+}
+
+static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data)
+{
+       struct aca_bank_info *info;
+       struct amdgpu_smuio_mcm_config_info mcm_info;
+       u64 count;
+
+       if (type >= ACA_ERROR_TYPE_COUNT)
+               return -EINVAL;
+
+       count = bank_error->count[type];
+       if (!count)
+               return 0;
+
+       info = &bank_error->info;
+       mcm_info.die_id = info->die_id;
+       mcm_info.socket_id = info->socket_id;
+
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+               amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, count);
+               break;
+       case ACA_ERROR_TYPE_CE:
+               amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count);
+               break;
+       case ACA_ERROR_TYPE_DEFERRED:
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data)
+{
+       struct aca_error_cache *error_cache = &handle->error_cache;
+       struct aca_error *aerr = &error_cache->errors[type];
+       struct aca_bank_error *bank_error, *tmp;
+
+       mutex_lock(&aerr->lock);
+
+       if (list_empty(&aerr->list))
+               goto out_unlock;
+
+       list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
+               aca_log_aca_error_data(bank_error, type, err_data);
+               aca_bank_error_remove(aerr, bank_error);
+       }
+
+out_unlock:
+       mutex_unlock(&aerr->lock);
+
+       return 0;
+}
+
+static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
+                               struct ras_err_data *err_data)
+{
+       int ret;
+
+       /* udpate aca bank to aca source error_cache first */
+       ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL);
+       if (ret)
+               return ret;
+
+       return aca_log_aca_error(handle, type, err_data);
+}
+
+static bool aca_handle_is_valid(struct aca_handle *handle)
+{
+       if (!handle->mask || !list_empty(&handle->node))
+               return false;
+
+       return true;
+}
+
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+                             enum aca_error_type type, void *data)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)data;
+
+       if (!handle || !err_data)
+               return -EINVAL;
+
+       if (aca_handle_is_valid(handle))
+               return -EOPNOTSUPP;
+
+       if (!(BIT(type) & handle->mask))
+               return  0;
+
+       return __aca_get_error_data(adev, handle, type, err_data);
+}
+
+static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
+{
+       mutex_init(&aerr->lock);
+       INIT_LIST_HEAD(&aerr->list);
+       aerr->type = type;
+       aerr->nr_errors = 0;
+}
+
+static void aca_init_error_cache(struct aca_handle *handle)
+{
+       struct aca_error_cache *error_cache = &handle->error_cache;
+       int type;
+
+       for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+               aca_error_init(&error_cache->errors[type], type);
+}
+
+static void aca_error_fini(struct aca_error *aerr)
+{
+       struct aca_bank_error *bank_error, *tmp;
+
+       mutex_lock(&aerr->lock);
+       list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
+               aca_bank_error_remove(aerr, bank_error);
+
+       mutex_destroy(&aerr->lock);
+}
+
+static void aca_fini_error_cache(struct aca_handle *handle)
+{
+       struct aca_error_cache *error_cache = &handle->error_cache;
+       int type;
+
+       for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+               aca_error_fini(&error_cache->errors[type]);
+}
+
+static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle,
+                         const char *name, const struct aca_info *ras_info, void *data)
+{
+       memset(handle, 0, sizeof(*handle));
+
+       handle->adev = adev;
+       handle->mgr = mgr;
+       handle->name = name;
+       handle->hwip = ras_info->hwip;
+       handle->mask = ras_info->mask;
+       handle->bank_ops = ras_info->bank_ops;
+       handle->data = data;
+       aca_init_error_cache(handle);
+
+       INIT_LIST_HEAD(&handle->node);
+       list_add_tail(&handle->node, &mgr->list);
+       mgr->nr_handles++;
+
+       return 0;
+}
+
+static ssize_t aca_sysfs_read(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr);
+
+       /* NOTE: the aca cache will be auto cleared once read,
+        * So the driver should unify the query entry point, forward request to ras query interface directly */
+       return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data);
+}
+
+static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle)
+{
+       struct device_attribute *aca_attr = &handle->aca_attr;
+
+       snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name);
+       aca_attr->show = aca_sysfs_read;
+       aca_attr->attr.name = handle->attr_name;
+       aca_attr->attr.mode = S_IRUGO;
+       sysfs_attr_init(&aca_attr->attr);
+
+       return sysfs_add_file_to_group(&adev->dev->kobj,
+                                      &aca_attr->attr,
+                                      "ras");
+}
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+                         const char *name, const struct aca_info *ras_info, void *data)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       int ret;
+
+       if (!amdgpu_aca_is_enabled(adev))
+               return 0;
+
+       ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data);
+       if (ret)
+               return ret;
+
+       return add_aca_sysfs(adev, handle);
+}
+
+static void remove_aca_handle(struct aca_handle *handle)
+{
+       struct aca_handle_manager *mgr = handle->mgr;
+
+       aca_fini_error_cache(handle);
+       list_del(&handle->node);
+       mgr->nr_handles--;
+}
+
+static void remove_aca_sysfs(struct aca_handle *handle)
+{
+       struct amdgpu_device *adev = handle->adev;
+       struct device_attribute *aca_attr = &handle->aca_attr;
+
+       if (adev->dev->kobj.sd)
+               sysfs_remove_file_from_group(&adev->dev->kobj,
+                                            &aca_attr->attr,
+                                            "ras");
+}
+
+void amdgpu_aca_remove_handle(struct aca_handle *handle)
+{
+       if (!handle || list_empty(&handle->node))
+               return;
+
+       remove_aca_sysfs(handle);
+       remove_aca_handle(handle);
+}
+
+static int aca_manager_init(struct aca_handle_manager *mgr)
+{
+       INIT_LIST_HEAD(&mgr->list);
+       mgr->nr_handles = 0;
+
+       return 0;
+}
+
+static void aca_manager_fini(struct aca_handle_manager *mgr)
+{
+       struct aca_handle *handle, *tmp;
+
+       list_for_each_entry_safe(handle, tmp, &mgr->list, node)
+               amdgpu_aca_remove_handle(handle);
+}
+
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev)
+{
+       return adev->aca.is_enabled;
+}
+
+int amdgpu_aca_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       int ret;
+
+       ret = aca_manager_init(&aca->mgr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+void amdgpu_aca_fini(struct amdgpu_device *adev)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+
+       aca_manager_fini(&aca->mgr);
+}
+
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+       amdgpu_aca_fini(adev);
+
+       return amdgpu_aca_init(adev);
+}
+
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+
+       WARN_ON(aca->smu_funcs);
+       aca->smu_funcs = smu_funcs;
+}
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
+{
+       u64 ipid;
+       u32 instidhi, instidlo;
+
+       if (!bank || !info)
+               return -EINVAL;
+
+       ipid = bank->regs[ACA_REG_IDX_IPID];
+       info->hwid = ACA_REG__IPID__HARDWAREID(ipid);
+       info->mcatype = ACA_REG__IPID__MCATYPE(ipid);
+       /*
+        * Unfied DieID Format: SAASS. A:AID, S:Socket.
+        * Unfied DieID[4:4] = InstanceId[0:0]
+        * Unfied DieID[0:3] = InstanceIdHi[0:3]
+        */
+       instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid);
+       instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid);
+       info->die_id = ((instidhi >> 2) & 0x03);
+       info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+       return 0;
+}
+
+static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+       int error_code;
+
+       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+       case IP_VERSION(13, 0, 6):
+               if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) {
+                       error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
+                       return error_code & 0xff;
+               }
+               break;
+       default:
+               break;
+       }
+
+       /* NOTE: the true error code is encoded in status.errorcode[0:7] */
+       error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
+
+       return error_code & 0xff;
+}
+
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
+{
+       int i, error_code;
+
+       if (!bank || !err_codes)
+               return -EINVAL;
+
+       error_code = aca_bank_get_error_code(adev, bank);
+       for (i = 0; i < size; i++) {
+               if (err_codes[i] == error_code)
+                       return 0;
+       }
+
+       return -EINVAL;
+}
+
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en)
+{
+       struct amdgpu_aca *aca = &adev->aca;
+       const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+       if (!smu_funcs || !smu_funcs->set_debug_mode)
+               return -EOPNOTSUPP;
+
+       return smu_funcs->set_debug_mode(adev, en);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)data;
+       int ret;
+
+       ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false);
+       if (ret)
+               return ret;
+
+       dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off");
+
+       return 0;
+}
+
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx)
+{
+       struct aca_bank_info info;
+       int i, ret;
+
+       ret = aca_bank_info_decode(bank, &info);
+       if (ret)
+               return;
+
+       seq_printf(m, "aca entry[%d].type: %s\n", idx, type ==  ACA_ERROR_TYPE_UE ? "UE" : "CE");
+       seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
+                  idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
+
+       for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+               seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]);
+}
+
+struct aca_dump_context {
+       struct seq_file *m;
+       int idx;
+};
+
+static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
+                                enum aca_error_type type, void *data)
+{
+       struct aca_dump_context *ctx = (struct aca_dump_context *)data;
+
+       aca_dump_entry(ctx->m, bank, type, ctx->idx++);
+
+       return handler_aca_log_bank_error(handle, bank, type, NULL);
+}
+
+static int aca_dump_show(struct seq_file *m, enum aca_error_type type)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct aca_dump_context context = {
+               .m = m,
+               .idx = 0,
+       };
+
+       return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context);
+}
+
+static int aca_dump_ce_show(struct seq_file *m, void *unused)
+{
+       return aca_dump_show(m, ACA_ERROR_TYPE_CE);
+}
+
+static int aca_dump_ce_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, aca_dump_ce_show, inode->i_private);
+}
+
+static const struct file_operations aca_ce_dump_debug_fops = {
+       .owner = THIS_MODULE,
+       .open = aca_dump_ce_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int aca_dump_ue_show(struct seq_file *m, void *unused)
+{
+       return aca_dump_show(m, ACA_ERROR_TYPE_UE);
+}
+
+static int aca_dump_ue_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, aca_dump_ue_show, inode->i_private);
+}
+
+static const struct file_operations aca_ue_dump_debug_fops = {
+       .owner = THIS_MODULE,
+       .open = aca_dump_ue_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n");
+#endif
+
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
+{
+#if defined(CONFIG_DEBUG_FS)
+       if (!root || adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 6))
+               return;
+
+       debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
+       debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops);
+       debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
new file mode 100644 (file)
index 0000000..2da50e0
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_ACA_H__
+#define __AMDGPU_ACA_H__
+
+#include <linux/list.h>
+
+#define ACA_MAX_REGS_COUNT     (16)
+
+#define ACA_REG_FIELD(x, h, l)                 (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG__STATUS__VAL(x)                        ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__STATUS__OVERFLOW(x)           ACA_REG_FIELD(x, 62, 62)
+#define ACA_REG__STATUS__UC(x)                 ACA_REG_FIELD(x, 61, 61)
+#define ACA_REG__STATUS__EN(x)                 ACA_REG_FIELD(x, 60, 60)
+#define ACA_REG__STATUS__MISCV(x)              ACA_REG_FIELD(x, 59, 59)
+#define ACA_REG__STATUS__ADDRV(x)              ACA_REG_FIELD(x, 58, 58)
+#define ACA_REG__STATUS__PCC(x)                        ACA_REG_FIELD(x, 57, 57)
+#define ACA_REG__STATUS__ERRCOREIDVAL(x)       ACA_REG_FIELD(x, 56, 56)
+#define ACA_REG__STATUS__TCC(x)                        ACA_REG_FIELD(x, 55, 55)
+#define ACA_REG__STATUS__SYNDV(x)              ACA_REG_FIELD(x, 53, 53)
+#define ACA_REG__STATUS__CECC(x)               ACA_REG_FIELD(x, 46, 46)
+#define ACA_REG__STATUS__UECC(x)               ACA_REG_FIELD(x, 45, 45)
+#define ACA_REG__STATUS__DEFERRED(x)           ACA_REG_FIELD(x, 44, 44)
+#define ACA_REG__STATUS__POISON(x)             ACA_REG_FIELD(x, 43, 43)
+#define ACA_REG__STATUS__SCRUB(x)              ACA_REG_FIELD(x, 40, 40)
+#define ACA_REG__STATUS__ERRCOREID(x)          ACA_REG_FIELD(x, 37, 32)
+#define ACA_REG__STATUS__ADDRLSB(x)            ACA_REG_FIELD(x, 29, 24)
+#define ACA_REG__STATUS__ERRORCODEEXT(x)       ACA_REG_FIELD(x, 21, 16)
+#define ACA_REG__STATUS__ERRORCODE(x)          ACA_REG_FIELD(x, 15, 0)
+
+#define ACA_REG__IPID__MCATYPE(x)              ACA_REG_FIELD(x, 63, 48)
+#define ACA_REG__IPID__INSTANCEIDHI(x)         ACA_REG_FIELD(x, 47, 44)
+#define ACA_REG__IPID__HARDWAREID(x)           ACA_REG_FIELD(x, 43, 32)
+#define ACA_REG__IPID__INSTANCEIDLO(x)         ACA_REG_FIELD(x, 31, 0)
+
+#define ACA_REG__MISC0__VALID(x)               ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__MISC0__OVRFLW(x)              ACA_REG_FIELD(x, 48, 48)
+#define ACA_REG__MISC0__ERRCNT(x)              ACA_REG_FIELD(x, 43, 32)
+
+#define ACA_REG__SYND__ERRORINFORMATION(x)     ACA_REG_FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE                   0x3a
+#define ACA_EXTERROR_CODE_FAULT                        0x3b
+
+#define ACA_ERROR_UE_MASK              BIT_MASK(ACA_ERROR_TYPE_UE)
+#define ACA_ERROR_CE_MASK              BIT_MASK(ACA_ERROR_TYPE_CE)
+#define ACA_ERROR_DEFERRED_MASK                BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+
+enum aca_reg_idx {
+       ACA_REG_IDX_CTL                 = 0,
+       ACA_REG_IDX_STATUS              = 1,
+       ACA_REG_IDX_ADDR                = 2,
+       ACA_REG_IDX_MISC0               = 3,
+       ACA_REG_IDX_CONFG               = 4,
+       ACA_REG_IDX_IPID                = 5,
+       ACA_REG_IDX_SYND                = 6,
+       ACA_REG_IDX_DESTAT              = 8,
+       ACA_REG_IDX_DEADDR              = 9,
+       ACA_REG_IDX_CTL_MASK            = 10,
+       ACA_REG_IDX_COUNT               = 16,
+};
+
+enum aca_hwip_type {
+       ACA_HWIP_TYPE_UNKNOW = -1,
+       ACA_HWIP_TYPE_PSP = 0,
+       ACA_HWIP_TYPE_UMC,
+       ACA_HWIP_TYPE_SMU,
+       ACA_HWIP_TYPE_PCS_XGMI,
+       ACA_HWIP_TYPE_COUNT,
+};
+
+enum aca_error_type {
+       ACA_ERROR_TYPE_INVALID = -1,
+       ACA_ERROR_TYPE_UE = 0,
+       ACA_ERROR_TYPE_CE,
+       ACA_ERROR_TYPE_DEFERRED,
+       ACA_ERROR_TYPE_COUNT
+};
+
+struct aca_bank {
+       u64 regs[ACA_MAX_REGS_COUNT];
+};
+
+struct aca_bank_node {
+       struct aca_bank bank;
+       struct list_head node;
+};
+
+struct aca_bank_info {
+       int die_id;
+       int socket_id;
+       int hwid;
+       int mcatype;
+};
+
+struct aca_bank_report {
+       struct aca_bank_info info;
+       u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_bank_error {
+       struct list_head node;
+       struct aca_bank_info info;
+       u64 count[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_error {
+       struct list_head list;
+       struct mutex lock;
+       enum aca_error_type type;
+       int nr_errors;
+};
+
+struct aca_handle_manager {
+       struct list_head list;
+       int nr_handles;
+};
+
+struct aca_error_cache {
+       struct aca_error errors[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_handle {
+       struct list_head node;
+       enum aca_hwip_type hwip;
+       struct amdgpu_device *adev;
+       struct aca_handle_manager *mgr;
+       struct aca_error_cache error_cache;
+       const struct aca_bank_ops *bank_ops;
+       struct device_attribute aca_attr;
+       char attr_name[64];
+       const char *name;
+       u32 mask;
+       void *data;
+};
+
+struct aca_bank_ops {
+       int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+                                       struct aca_bank_report *report, void *data);
+       bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+                                 void *data);
+};
+
+struct aca_smu_funcs {
+       int max_ue_bank_count;
+       int max_ce_bank_count;
+       int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
+       int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count);
+       int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank);
+};
+
+struct amdgpu_aca {
+       struct aca_handle_manager mgr;
+       const struct aca_smu_funcs *smu_funcs;
+       bool is_enabled;
+};
+
+struct aca_info {
+       enum aca_hwip_type hwip;
+       const struct aca_bank_ops *bank_ops;
+       u32 mask;
+};
+
+int amdgpu_aca_init(struct amdgpu_device *adev);
+void amdgpu_aca_fini(struct amdgpu_device *adev);
+int amdgpu_aca_reset(struct amdgpu_device *adev);
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs);
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev);
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info);
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size);
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+                         const char *name, const struct aca_info *aca_info, void *data);
+void amdgpu_aca_remove_handle(struct aca_handle *handle);
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+                                    enum aca_error_type type, void *data);
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+#endif
index 41db030ddc4ee9c98ba952b4b91d6292f7c457d6..190039f14c30c676f080e6c55a8c8637901a74fd 100644 (file)
@@ -742,9 +742,10 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
        amdgpu_device_flush_hdp(adev, NULL);
 }
 
-void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset)
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+       enum amdgpu_ras_block block, bool reset)
 {
-       amdgpu_umc_poison_handler(adev, reset);
+       amdgpu_umc_poison_handler(adev, block, reset);
 }
 
 int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
index 27c61c535e297931892902f1abb9e56ca6feea5c..e60f63ccf79a25a3aa01a987509a90988849f542 100644 (file)
@@ -193,6 +193,9 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
                                unsigned long cur_seq, struct kgd_mem *mem);
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+                                       uint32_t domain,
+                                       struct dma_fence *fence);
 #else
 static inline
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
@@ -218,6 +221,13 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
 {
        return 0;
 }
+static inline
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+                                       uint32_t domain,
+                                       struct dma_fence *fence)
+{
+       return 0;
+}
 #endif
 /* Shared API */
 int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
@@ -326,7 +336,7 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
                                struct tile_config *config);
 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
-                               bool reset);
+                       enum amdgpu_ras_block block, bool reset);
 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
 void amdgpu_amdkfd_block_mmu_notifications(void *p);
 int amdgpu_amdkfd_criu_resume(void *p);
index 231fd927dcfbee0db07e3a5d28eed2b24ff82b9c..5cd84f72bf26ef5a6896f7ea853c654b55e8569d 100644 (file)
@@ -426,9 +426,9 @@ validate_fail:
        return ret;
 }
 
-static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
-                                              uint32_t domain,
-                                              struct dma_fence *fence)
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+                                       uint32_t domain,
+                                       struct dma_fence *fence)
 {
        int ret = amdgpu_bo_reserve(bo, false);
 
@@ -464,13 +464,15 @@ static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
  * again. Page directories are only updated after updating page
  * tables.
  */
-static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
+static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
+                                struct ww_acquire_ctx *ticket)
 {
        struct amdgpu_bo *pd = vm->root.bo;
        struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
        int ret;
 
-       ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
+       ret = amdgpu_vm_validate(adev, vm, ticket,
+                                amdgpu_amdkfd_validate_vm_bo, NULL);
        if (ret) {
                pr_err("failed to validate PT BOs\n");
                return ret;
@@ -1310,14 +1312,15 @@ update_gpuvm_pte_failed:
        return ret;
 }
 
-static int process_validate_vms(struct amdkfd_process_info *process_info)
+static int process_validate_vms(struct amdkfd_process_info *process_info,
+                               struct ww_acquire_ctx *ticket)
 {
        struct amdgpu_vm *peer_vm;
        int ret;
 
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                            vm_list_node) {
-               ret = vm_validate_pt_pd_bos(peer_vm);
+               ret = vm_validate_pt_pd_bos(peer_vm, ticket);
                if (ret)
                        return ret;
        }
@@ -1402,7 +1405,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
        ret = amdgpu_bo_reserve(vm->root.bo, true);
        if (ret)
                goto reserve_pd_fail;
-       ret = vm_validate_pt_pd_bos(vm);
+       ret = vm_validate_pt_pd_bos(vm, NULL);
        if (ret) {
                pr_err("validate_pt_pd_bos() failed\n");
                goto validate_pd_fail;
@@ -2043,7 +2046,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
            bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
                is_invalid_userptr = true;
 
-       ret = vm_validate_pt_pd_bos(avm);
+       ret = vm_validate_pt_pd_bos(avm, NULL);
        if (unlikely(ret))
                goto out_unreserve;
 
@@ -2136,7 +2139,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                goto unreserve_out;
        }
 
-       ret = vm_validate_pt_pd_bos(avm);
+       ret = vm_validate_pt_pd_bos(avm, NULL);
        if (unlikely(ret))
                goto unreserve_out;
 
@@ -2634,7 +2637,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                }
        }
 
-       ret = process_validate_vms(process_info);
+       ret = process_validate_vms(process_info, NULL);
        if (ret)
                goto unreserve_out;
 
@@ -2894,11 +2897,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
 
        amdgpu_sync_create(&sync_obj);
 
-       /* Validate PDs and PTs */
-       ret = process_validate_vms(process_info);
-       if (ret)
-               goto validate_map_fail;
-
        /* Validate BOs and map them to GPUVM (update VM page tables). */
        list_for_each_entry(mem, &process_info->kfd_bo_list,
                            validate_list) {
@@ -2949,6 +2947,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
        if (failed_size)
                pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
 
+       /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+        * validations above would invalidate DMABuf imports again.
+        */
+       ret = process_validate_vms(process_info, &exec.ticket);
+       if (ret)
+               goto validate_map_fail;
+
        /* Update mappings not managed by KFD */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                        vm_list_node) {
@@ -3020,7 +3025,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
                                   &process_info->eviction_fence->base,
                                   DMA_RESV_USAGE_BOOKKEEP);
        }
-       /* Attach eviction fence to PD / PT BOs */
+       /* Attach eviction fence to PD / PT BOs and DMABuf imports */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                            vm_list_node) {
                struct amdgpu_bo *bo = peer_vm->root.bo;
index dce9e7d5e4ec672827f574fb64816ca205ef96ee..52b12c1718eb0ecd38d5563306c959ae675f3438 100644 (file)
@@ -1018,7 +1018,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
                        args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
 
-                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                               sizeof(args));
 
                        dividers->post_div = args.v3.ucPostDiv;
                        dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1038,7 +1039,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                        if (strobe_mode)
                                args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
 
-                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                               sizeof(args));
 
                        dividers->post_div = args.v5.ucPostDiv;
                        dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1056,7 +1058,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                /* fusion */
                args.v4.ulClock = cpu_to_le32(clock);   /* 10 khz */
 
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                       sizeof(args));
 
                dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
                dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1067,7 +1070,8 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                args.v6_in.ulClock.ulComputeClockFlag = clock_type;
                args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock);    /* 10 khz */
 
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                       sizeof(args));
 
                dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
                dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1109,7 +1113,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
                        if (strobe_mode)
                                args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
 
-                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                               sizeof(args));
 
                        mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
                        mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1151,7 +1156,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
        if (mem_clock)
                args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+               sizeof(args));
 }
 
 void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
@@ -1205,7 +1211,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
                args.v2.ucVoltageMode = 0;
                args.v2.usVoltageLevel = 0;
 
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                       sizeof(args));
 
                *voltage = le16_to_cpu(args.v2.usVoltageLevel);
                break;
@@ -1214,7 +1221,8 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
                args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
                args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
 
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
+                       sizeof(args));
 
                *voltage = le16_to_cpu(args.v3.usVoltageLevel);
                break;
index fb2681dd6b338c222eaa0431cce61f940ad239b2..6857c586ded710e62d6b5a15d77966b719c05c6f 100644 (file)
@@ -941,5 +941,6 @@ int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
                return -EINVAL;
        }
 
-       return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1);
+       return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
+               sizeof(asic_init_ps_v2_1));
 }
index c7eb2caec65a92491b66fe98178199bfe0e7c6f8..649b5530d8ae13905b32937c5a36ce8b5e671d35 100644 (file)
@@ -36,7 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_ti2c_address);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t *i2c_address);
 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
index 6adeddfb3d5643bebe7366094ce8f4ba00dcdfb2..0a4b09709cfb149078c6284f2a0908cbde928430 100644 (file)
@@ -952,10 +952,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        p->bytes_moved = 0;
        p->bytes_moved_vis = 0;
 
-       r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
-                                     amdgpu_cs_bo_validate, p);
+       r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
+                              amdgpu_cs_bo_validate, p);
        if (r) {
-               DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
+               DRM_ERROR("amdgpu_vm_validate() failed.\n");
                goto out_free_user_pages;
        }
 
index 796fa6f1420b339c00ee17115765147c6714bbb8..b5ad56690a9d68f63e585ae0bb82522848ac6b9b 100644 (file)
@@ -30,7 +30,7 @@ uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
 {
        uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
 
-       addr -= AMDGPU_VA_RESERVED_SIZE;
+       addr -= AMDGPU_VA_RESERVED_CSA_SIZE;
        addr = amdgpu_gmc_sign_extend(addr);
 
        return addr;
index fdde7488d0ed9a8ff93f4a4cc58c123b904236c7..d534e192e260d7e5cc92f68e05e70b79e9e7f8ac 100644 (file)
@@ -96,6 +96,9 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 #define AMDGPU_RESUME_MS               2000
 #define AMDGPU_MAX_RETRY_LIMIT         2
 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
+#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
+#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
 
 static const struct drm_driver amdgpu_kms_driver;
 
@@ -781,12 +784,22 @@ u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
        void __iomem *pcie_index_hi_offset;
        void __iomem *pcie_data_offset;
 
-       pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
-       pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
-       if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
-               pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
-       else
+       if (unlikely(!adev->nbio.funcs)) {
+               pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
+               pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
+       } else {
+               pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+               pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+       }
+
+       if (reg_addr >> 32) {
+               if (unlikely(!adev->nbio.funcs))
+                       pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
+               else
+                       pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+       } else {
                pcie_index_hi = 0;
+       }
 
        spin_lock_irqsave(&adev->pcie_idx_lock, flags);
        pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
@@ -1218,8 +1231,6 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
            amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
                amdgpu_psp_wait_for_bootloader(adev);
                ret = amdgpu_atomfirmware_asic_init(adev, true);
-               /* TODO: check the return val and stop device initialization if boot fails */
-               amdgpu_psp_query_boot_status(adev);
                return ret;
        } else {
                return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1442,6 +1453,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
+       /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+       if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+               DRM_WARN("System can't access extended configuration space,please check!!\n");
+
        /* skip if the bios has already enabled large BAR */
        if (adev->gmc.real_vram_size &&
            (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
@@ -5680,6 +5695,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
                if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
                            IP_VERSION(9, 4, 2) ||
+                   amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
                    amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
                        amdgpu_ras_resume(adev);
        } else {
@@ -6101,6 +6117,20 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
        struct amdgpu_reset_context reset_context;
        u32 memsize;
        struct list_head device_list;
+       struct amdgpu_hive_info *hive;
+       int hive_ras_recovery = 0;
+       struct amdgpu_ras *ras;
+
+       /* PCI error slot reset should be skipped During RAS recovery */
+       hive = amdgpu_get_xgmi_hive(adev);
+       if (hive) {
+               hive_ras_recovery = atomic_read(&hive->ras_recovery);
+               amdgpu_put_xgmi_hive(hive);
+       }
+       ras = amdgpu_ras_get_context(adev);
+       if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) &&
+                ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+               return PCI_ERS_RESULT_RECOVERED;
 
        DRM_INFO("PCI error: slot reset callback!!\n");
 
index c7d60dd0fb975d47d749300c79f976da15892736..118288b644870235d66a8ccde39db42e40eda06f 100644 (file)
@@ -27,6 +27,7 @@
 #include "amdgpu_discovery.h"
 #include "soc15_hw_ip.h"
 #include "discovery.h"
+#include "amdgpu_ras.h"
 
 #include "soc15.h"
 #include "gfx_v9_0.h"
@@ -98,6 +99,7 @@
 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
 
+#define mmIP_DISCOVERY_VERSION  0x16A00
 #define mmRCC_CONFIG_MEMSIZE   0xde3
 #define mmMP0_SMN_C2PMSG_33    0x16061
 #define mmMM_INDEX             0x0
@@ -518,7 +520,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
 out:
        kfree(adev->mman.discovery_bin);
        adev->mman.discovery_bin = NULL;
-
+       if ((amdgpu_discovery != 2) &&
+           (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+               amdgpu_ras_query_boot_status(adev, 4);
        return r;
 }
 
@@ -1278,11 +1282,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                 *     0b10 : encode is disabled
                                 *     0b01 : decode is disabled
                                 */
-                               adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
-                                       ip->revision & 0xc0;
-                               ip->revision &= ~0xc0;
                                if (adev->vcn.num_vcn_inst <
                                    AMDGPU_MAX_VCN_INSTANCES) {
+                                       adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+                                               ip->revision & 0xc0;
                                        adev->vcn.num_vcn_inst++;
                                        adev->vcn.inst_mask |=
                                                (1U << ip->instance_number);
@@ -1293,6 +1296,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                                adev->vcn.num_vcn_inst + 1,
                                                AMDGPU_MAX_VCN_INSTANCES);
                                }
+                               ip->revision &= ~0xc0;
                        }
                        if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
                            le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
index decbbe3d4f06e9aa67365b68e747feeb5bad6fc1..055ba2ea4c126f621890b56c56ef86672e63b0c3 100644 (file)
@@ -377,6 +377,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
        struct amdgpu_vm_bo_base *bo_base;
        int r;
 
+       /* FIXME: This should be after the "if", but needs a fix to make sure
+        * DMABuf imports are initialized in the right VM list.
+        */
+       amdgpu_vm_bo_invalidate(adev, bo, false);
        if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
                return;
 
index 971acf01bea6063cfcca6275a21b12662cfbd824..161ecf9b41747924e42b0e3d2f822f0b2ad42b35 100644 (file)
@@ -366,7 +366,7 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
  * Setting the value to 0 disables this functionality.
  * Setting the value to -2 is auto enabled with power down when displays are attached.
  */
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = autowith displays)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)");
 module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
 
 /**
@@ -593,7 +593,7 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
 #ifdef CONFIG_DRM_AMDGPU_SI
 
 #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support = 0;
+int amdgpu_si_support;
 MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
 #else
 int amdgpu_si_support = 1;
@@ -612,7 +612,7 @@ module_param_named(si_support, amdgpu_si_support, int, 0444);
 #ifdef CONFIG_DRM_AMDGPU_CIK
 
 #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support = 0;
+int amdgpu_cik_support;
 MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
 #else
 int amdgpu_cik_support = 1;
@@ -2476,6 +2476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
 
+       adev->suspend_complete = false;
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else if (amdgpu_acpi_is_s3_active(adev))
@@ -2490,6 +2491,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
 
+       adev->suspend_complete = true;
        if (amdgpu_acpi_should_gpu_reset(adev))
                return amdgpu_asic_reset(adev);
 
index 49a5f1c73b3ecc98234654dcb452142f3aa7c564..22aeee8adb71bf8d8a076d013db062a287f23674 100644 (file)
@@ -187,7 +187,34 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
        else
                ++bo_va->ref_count;
        amdgpu_bo_unreserve(abo);
-       return 0;
+
+       /* Validate and add eviction fence to DMABuf imports with dynamic
+        * attachment in compute VMs. Re-validation will be done by
+        * amdgpu_vm_validate. Fences are on the reservation shared with the
+        * export, which is currently required to be validated and fenced
+        * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
+        *
+        * Nested locking below for the case that a GEM object is opened in
+        * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
+        * but not for export, this is a different lock class that cannot lead to
+        * circular lock dependencies.
+        */
+       if (!vm->is_compute_context || !vm->process_info)
+               return 0;
+       if (!obj->import_attach ||
+           !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+               return 0;
+       mutex_lock_nested(&vm->process_info->lock, 1);
+       if (!WARN_ON(!vm->process_info->eviction_fence)) {
+               r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
+                                                       &vm->process_info->eviction_fence->base);
+               if (r)
+                       dev_warn(adev->dev, "%d: validate_and_fence failed: %d\n",
+                                vm->task_info.pid, r);
+       }
+       mutex_unlock(&vm->process_info->lock);
+
+       return r;
 }
 
 static void amdgpu_gem_object_close(struct drm_gem_object *obj,
@@ -682,10 +709,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        uint64_t vm_size;
        int r = 0;
 
-       if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
+       if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
                dev_dbg(dev->dev,
                        "va_address 0x%llx is in reserved area 0x%llx\n",
-                       args->va_address, AMDGPU_VA_RESERVED_SIZE);
+                       args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
                return -EINVAL;
        }
 
@@ -701,7 +728,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        args->va_address &= AMDGPU_GMC_HOLE_MASK;
 
        vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
-       vm_size -= AMDGPU_VA_RESERVED_SIZE;
+       vm_size -= AMDGPU_VA_RESERVED_TOP;
        if (args->va_address + args->map_size > vm_size) {
                dev_dbg(dev->dev,
                        "va_address 0x%llx is in top reserved area 0x%llx\n",
index 82b4b2019fca03bcc863037d5c5e7be784ed9580..78229b61c233f6e98ddf4427e4faa44fcfb9e19f 100644 (file)
@@ -643,8 +643,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
        kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                j = i + xcc_id * adev->gfx.num_compute_rings;
-                       kiq->pmf->kiq_map_queues(kiq_ring,
-                                                &adev->gfx.compute_ring[j]);
+               kiq->pmf->kiq_map_queues(kiq_ring,
+                                        &adev->gfx.compute_ring[j]);
        }
 
        r = amdgpu_ring_test_helper(kiq_ring);
index 55784a9f26c4c83b17008a766130c234df8ecbaf..d4a848c51a83cba5cd37188b41a266af9f6835a4 100644 (file)
@@ -52,7 +52,7 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
        struct amdgpu_bo_param bp;
        u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
        uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
-       uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
+       uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
 
        memset(&bp, 0, sizeof(bp));
        bp.size = PAGE_ALIGN((npdes + 1) * 8);
@@ -746,6 +746,59 @@ error_unlock_reset:
        return r;
 }
 
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+                                     uint32_t reg0, uint32_t reg1,
+                                     uint32_t ref, uint32_t mask,
+                                     uint32_t xcc_inst)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
+       struct amdgpu_ring *ring = &kiq->ring;
+       signed long r, cnt = 0;
+       unsigned long flags;
+       uint32_t seq;
+
+       if (adev->mes.ring.sched.ready) {
+               amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
+                                             ref, mask);
+               return;
+       }
+
+       spin_lock_irqsave(&kiq->ring_lock, flags);
+       amdgpu_ring_alloc(ring, 32);
+       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+                                           ref, mask);
+       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+       if (r)
+               goto failed_undo;
+
+       amdgpu_ring_commit(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+       /* don't wait anymore for IRQ context */
+       if (r < 1 && in_interrupt())
+               goto failed_kiq;
+
+       might_sleep();
+       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+       }
+
+       if (cnt > MAX_KIQ_REG_TRY)
+               goto failed_kiq;
+
+       return;
+
+failed_undo:
+       amdgpu_ring_undo(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq:
+       dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
 /**
  * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
  * @adev: amdgpu_device pointer
index e699d1ca8debd3e1d49de46a844048e67b55f23a..17f40ea1104b00bcfd1596337a2848b9dc5ef210 100644 (file)
@@ -417,6 +417,10 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
                                   uint32_t flush_type, bool all_hub,
                                   uint32_t inst);
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+                                     uint32_t reg0, uint32_t reg1,
+                                     uint32_t ref, uint32_t mask,
+                                     uint32_t xcc_inst);
 
 extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
 extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
index ddd0891da116b9dd9de237643c0707bbf816e751..3d7fcdeaf8cf00c398784bde87a0310fe401aea6 100644 (file)
@@ -62,9 +62,8 @@ int amdgpu_pasid_alloc(unsigned int bits)
        int pasid = -EINVAL;
 
        for (bits = min(bits, 31U); bits > 0; bits--) {
-               pasid = ida_simple_get(&amdgpu_pasid_ida,
-                                      1U << (bits - 1), 1U << bits,
-                                      GFP_KERNEL);
+               pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
+                                       (1U << bits) - 1, GFP_KERNEL);
                if (pasid != -ENOSPC)
                        break;
        }
@@ -82,7 +81,7 @@ int amdgpu_pasid_alloc(unsigned int bits)
 void amdgpu_pasid_free(u32 pasid)
 {
        trace_amdgpu_pasid_freed(pasid);
-       ida_simple_remove(&amdgpu_pasid_ida, pasid);
+       ida_free(&amdgpu_pasid_ida, pasid);
 }
 
 static void amdgpu_pasid_free_cb(struct dma_fence *fence,
index bf4f48fe438d1b5936852145c8b4c1059446381c..a2df3025a754b7cc29376b5ff65c3f9c11b8cdb9 100644 (file)
@@ -894,14 +894,14 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
 
                vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
-               vm_size -= AMDGPU_VA_RESERVED_SIZE;
+               vm_size -= AMDGPU_VA_RESERVED_TOP;
 
                /* Older VCE FW versions are buggy and can handle only 40bits */
                if (adev->vce.fw_version &&
                    adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
                        vm_size = min(vm_size, 1ULL << 40);
 
-               dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
+               dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM;
                dev_info->virtual_address_max =
                        min(vm_size, AMDGPU_GMC_HOLE_START);
 
@@ -1114,6 +1114,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        }
                        ui32 >>= 8;
                        break;
+               case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER:
+                       /* get input GPU power */
+                       if (amdgpu_dpm_read_sensor(adev,
+                                                  AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+                                                  (void *)&ui32, &ui32_size)) {
+                               return -EINVAL;
+                       }
+                       ui32 >>= 8;
+                       break;
                case AMDGPU_INFO_SENSOR_VDDNB:
                        /* get VDDNB in millivolts */
                        if (amdgpu_dpm_read_sensor(adev,
@@ -1370,6 +1379,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        goto error_vm;
        }
 
+       r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
+       if (r)
+               goto error_vm;
+
        mutex_init(&fpriv->bo_list_lock);
        idr_init_base(&fpriv->bo_list_handles, 1);
 
index 59fafb8392e0bae775e721e166aac800dfbdc98c..24ad4b97177b5cff7840cb434115f61018f09f74 100644 (file)
 #include "umc/umc_6_7_0_offset.h"
 #include "umc/umc_6_7_0_sh_mask.h"
 
+static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
+                                       uint64_t mc_status)
+{
+       if (adev->umc.ras->check_ecc_err_status)
+               return adev->umc.ras->check_ecc_err_status(adev,
+                               AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
+
+       return false;
+}
+
 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
                                              uint64_t mc_status_addr,
                                              unsigned long *error_count)
@@ -202,16 +212,16 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
 
 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
 {
-       dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n");
-       dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n",
+       dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
+       dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
                 idx, entry->regs[MCA_REG_IDX_STATUS]);
-       dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n",
+       dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
                 idx, entry->regs[MCA_REG_IDX_ADDR]);
-       dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n",
+       dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
                 idx, entry->regs[MCA_REG_IDX_MISC0]);
-       dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n",
+       dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
                 idx, entry->regs[MCA_REG_IDX_IPID]);
-       dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n",
+       dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
                 idx, entry->regs[MCA_REG_IDX_SYND]);
 }
 
@@ -256,9 +266,14 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo
                if (type == AMDGPU_MCA_ERROR_TYPE_UE)
                        amdgpu_ras_error_statistic_ue_count(err_data,
                                &mcm_info, &err_addr, (uint64_t)count);
-               else
-                       amdgpu_ras_error_statistic_ce_count(err_data,
-                               &mcm_info, &err_addr, (uint64_t)count);
+               else {
+                       if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
+                               amdgpu_ras_error_statistic_de_count(err_data,
+                                       &mcm_info, &err_addr, (uint64_t)count);
+                       else
+                               amdgpu_ras_error_statistic_ce_count(err_data,
+                                       &mcm_info, &err_addr, (uint64_t)count);
+               }
        }
 
 out_mca_release:
index b399f1b62887a98432a5d8a9b9ac34e913384004..b964110ed1e05e4f1a55e2659837fe1c3cb601af 100644 (file)
@@ -65,6 +65,7 @@ enum amdgpu_mca_ip {
 enum amdgpu_mca_error_type {
        AMDGPU_MCA_ERROR_TYPE_UE = 0,
        AMDGPU_MCA_ERROR_TYPE_CE,
+       AMDGPU_MCA_ERROR_TYPE_DE,
 };
 
 struct amdgpu_mca_ras_block {
index da48b6da010725a21f9d4261bb59621a50064232..a98e03e0a51f1f741895d253f896e76de29f9aec 100644 (file)
@@ -1398,7 +1398,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
                goto error_fini;
        }
 
-       ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+       ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
        r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
        if (r) {
                DRM_ERROR("failed to map ctx meta data\n");
@@ -1565,9 +1565,9 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DEBUG_FS)
        struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
-
-       debugfs_create_file("amdgpu_mes_event_log", 0444, root,
-                           adev, &amdgpu_debugfs_mes_event_log_fops);
+       if (adev->enable_mes)
+               debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+                                   adev, &amdgpu_debugfs_mes_event_log_fops);
 
 #endif
 }
index 0328616473f80af861cd4a1176afc0221eee7db9..d9e5eb24341d3857508b05bf0cd92bc539f45b33 100644 (file)
@@ -291,21 +291,22 @@ static int psp_memory_training_init(struct psp_context *psp)
        struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
 
        if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
-               DRM_DEBUG("memory training is not supported!\n");
+               dev_dbg(psp->adev->dev, "memory training is not supported!\n");
                return 0;
        }
 
        ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
        if (ctx->sys_cache == NULL) {
-               DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
+               dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
                ret = -ENOMEM;
                goto Err_out;
        }
 
-       DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
-                 ctx->train_data_size,
-                 ctx->p2c_train_data_offset,
-                 ctx->c2p_train_data_offset);
+       dev_dbg(psp->adev->dev,
+               "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+               ctx->train_data_size,
+               ctx->p2c_train_data_offset,
+               ctx->c2p_train_data_offset);
        ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
        return 0;
 
@@ -407,7 +408,7 @@ static int psp_sw_init(void *handle)
 
        psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
        if (!psp->cmd) {
-               DRM_ERROR("Failed to allocate memory to command buffer!\n");
+               dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
                ret = -ENOMEM;
        }
 
@@ -454,13 +455,13 @@ static int psp_sw_init(void *handle)
        if (mem_training_ctx->enable_mem_training) {
                ret = psp_memory_training_init(psp);
                if (ret) {
-                       DRM_ERROR("Failed to initialize memory training!\n");
+                       dev_err(adev->dev, "Failed to initialize memory training!\n");
                        return ret;
                }
 
                ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
                if (ret) {
-                       DRM_ERROR("Failed to process memory training!\n");
+                       dev_err(adev->dev, "Failed to process memory training!\n");
                        return ret;
                }
        }
@@ -675,9 +676,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
         */
        if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
                if (ucode)
-                       DRM_WARN("failed to load ucode %s(0x%X) ",
-                                 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
-               DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+                       dev_warn(psp->adev->dev,
+                                "failed to load ucode %s(0x%X) ",
+                                amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+               dev_warn(psp->adev->dev,
+                        "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
                         psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
                         psp->cmd_buf_mem->resp.status);
                /* If any firmware (including CAP) load fails under SRIOV, it should
@@ -807,7 +810,7 @@ static int psp_tmr_init(struct psp_context *psp)
            psp->fw_pri_buf) {
                ret = psp_load_toc(psp, &tmr_size);
                if (ret) {
-                       DRM_ERROR("Failed to load toc\n");
+                       dev_err(psp->adev->dev, "Failed to load toc\n");
                        return ret;
                }
        }
@@ -855,7 +858,7 @@ static int psp_tmr_load(struct psp_context *psp)
 
        psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
        if (psp->tmr_bo)
-               DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
+               dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
                         amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
@@ -1113,7 +1116,7 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
        psp_prep_reg_prog_cmd_buf(cmd, reg, value);
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
        if (ret)
-               DRM_ERROR("PSP failed to program reg id %d", reg);
+               dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
 
        release_psp_cmd_buf(psp);
 
@@ -1526,22 +1529,22 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
        switch (ras_cmd->ras_status) {
        case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
                dev_warn(psp->adev->dev,
-                               "RAS WARNING: cmd failed due to unsupported ip\n");
+                        "RAS WARNING: cmd failed due to unsupported ip\n");
                break;
        case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
                dev_warn(psp->adev->dev,
-                               "RAS WARNING: cmd failed due to unsupported error injection\n");
+                        "RAS WARNING: cmd failed due to unsupported error injection\n");
                break;
        case TA_RAS_STATUS__SUCCESS:
                break;
        case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
                if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
                        dev_warn(psp->adev->dev,
-                                       "RAS WARNING: Inject error to critical region is not allowed\n");
+                                "RAS WARNING: Inject error to critical region is not allowed\n");
                break;
        default:
                dev_warn(psp->adev->dev,
-                               "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+                        "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
                break;
        }
 }
@@ -1565,7 +1568,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
                return ret;
 
        if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
-               DRM_WARN("RAS: Unsupported Interface");
+               dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
                return -EINVAL;
        }
 
@@ -1715,7 +1718,7 @@ int psp_ras_initialize(struct psp_context *psp)
                psp->ras_context.context.initialized = true;
        else {
                if (ras_cmd->ras_status)
-                       dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+                       dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
 
                /* fail to load RAS TA */
                psp->ras_context.context.initialized = false;
@@ -1779,6 +1782,31 @@ int psp_ras_trigger_error(struct psp_context *psp,
 
        return 0;
 }
+
+int psp_ras_query_address(struct psp_context *psp,
+                         struct ta_ras_query_address_input *addr_in,
+                         struct ta_ras_query_address_output *addr_out)
+{
+       struct ta_ras_shared_memory *ras_cmd;
+       int ret;
+
+       if (!psp->ras_context.context.initialized)
+               return -EINVAL;
+
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+       memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+       ras_cmd->cmd_id = TA_RAS_COMMAND__QUERY_ADDRESS;
+       ras_cmd->ras_in_message.address = *addr_in;
+
+       ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+       if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+               return -EINVAL;
+
+       *addr_out = ras_cmd->ras_out_message.address;
+
+       return 0;
+}
 // ras end
 
 // HDCP start
@@ -2125,19 +2153,14 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
        return ret;
 }
 
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
 {
-       struct psp_context *psp = &adev->psp;
-       int ret = 0;
-
-       if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
-               return 0;
-
        if (psp->funcs &&
-           psp->funcs->query_boot_status)
-               ret = psp->funcs->query_boot_status(psp);
-
-       return ret;
+           psp->funcs->get_ras_capability) {
+               return psp->funcs->get_ras_capability(psp);
+       } else {
+               return false;
+       }
 }
 
 static int psp_hw_start(struct psp_context *psp)
@@ -2150,7 +2173,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_kdb != NULL)) {
                        ret = psp_bootloader_load_kdb(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load kdb failed!\n");
+                               dev_err(adev->dev, "PSP load kdb failed!\n");
                                return ret;
                        }
                }
@@ -2159,7 +2182,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_spl != NULL)) {
                        ret = psp_bootloader_load_spl(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load spl failed!\n");
+                               dev_err(adev->dev, "PSP load spl failed!\n");
                                return ret;
                        }
                }
@@ -2168,7 +2191,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_sysdrv != NULL)) {
                        ret = psp_bootloader_load_sysdrv(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load sys drv failed!\n");
+                               dev_err(adev->dev, "PSP load sys drv failed!\n");
                                return ret;
                        }
                }
@@ -2177,7 +2200,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_soc_drv != NULL)) {
                        ret = psp_bootloader_load_soc_drv(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load soc drv failed!\n");
+                               dev_err(adev->dev, "PSP load soc drv failed!\n");
                                return ret;
                        }
                }
@@ -2186,7 +2209,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_intf_drv != NULL)) {
                        ret = psp_bootloader_load_intf_drv(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load intf drv failed!\n");
+                               dev_err(adev->dev, "PSP load intf drv failed!\n");
                                return ret;
                        }
                }
@@ -2195,7 +2218,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_dbg_drv != NULL)) {
                        ret = psp_bootloader_load_dbg_drv(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load dbg drv failed!\n");
+                               dev_err(adev->dev, "PSP load dbg drv failed!\n");
                                return ret;
                        }
                }
@@ -2204,7 +2227,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_ras_drv != NULL)) {
                        ret = psp_bootloader_load_ras_drv(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load ras_drv failed!\n");
+                               dev_err(adev->dev, "PSP load ras_drv failed!\n");
                                return ret;
                        }
                }
@@ -2213,7 +2236,7 @@ static int psp_hw_start(struct psp_context *psp)
                    (psp->funcs->bootloader_load_sos != NULL)) {
                        ret = psp_bootloader_load_sos(psp);
                        if (ret) {
-                               DRM_ERROR("PSP load sos failed!\n");
+                               dev_err(adev->dev, "PSP load sos failed!\n");
                                return ret;
                        }
                }
@@ -2221,7 +2244,7 @@ static int psp_hw_start(struct psp_context *psp)
 
        ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
        if (ret) {
-               DRM_ERROR("PSP create ring failed!\n");
+               dev_err(adev->dev, "PSP create ring failed!\n");
                return ret;
        }
 
@@ -2231,7 +2254,7 @@ static int psp_hw_start(struct psp_context *psp)
        if (!psp_boottime_tmr(psp)) {
                ret = psp_tmr_init(psp);
                if (ret) {
-                       DRM_ERROR("PSP tmr init failed!\n");
+                       dev_err(adev->dev, "PSP tmr init failed!\n");
                        return ret;
                }
        }
@@ -2250,7 +2273,7 @@ skip_pin_bo:
 
        ret = psp_tmr_load(psp);
        if (ret) {
-               DRM_ERROR("PSP load tmr failed!\n");
+               dev_err(adev->dev, "PSP load tmr failed!\n");
                return ret;
        }
 
@@ -2518,7 +2541,8 @@ static void psp_print_fw_hdr(struct psp_context *psp,
        }
 }
 
-static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
+static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
+                                      struct amdgpu_firmware_info *ucode,
                                       struct psp_gfx_cmd_resp *cmd)
 {
        int ret;
@@ -2531,7 +2555,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
 
        ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
        if (ret)
-               DRM_ERROR("Unknown firmware type\n");
+               dev_err(psp->adev->dev, "Unknown firmware type\n");
 
        return ret;
 }
@@ -2542,7 +2566,7 @@ int psp_execute_ip_fw_load(struct psp_context *psp,
        int ret = 0;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
+       ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
        if (!ret) {
                ret = psp_cmd_submit_buf(psp, ucode, cmd,
                                         psp->fence_buf_mc_addr);
@@ -2601,13 +2625,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
              amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
                ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
                if (ret)
-                       DRM_WARN("Failed to set MP1 state prepare for reload\n");
+                       dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
        }
 
        ret = psp_execute_ip_fw_load(psp, ucode);
 
        if (ret)
-               DRM_ERROR("PSP load smu failed!\n");
+               dev_err(adev->dev, "PSP load smu failed!\n");
 
        return ret;
 }
@@ -2712,7 +2736,7 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
                    adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
                        ret = psp_rlc_autoload_start(psp);
                        if (ret) {
-                               DRM_ERROR("Failed to start rlc autoload\n");
+                               dev_err(adev->dev, "Failed to start rlc autoload\n");
                                return ret;
                        }
                }
@@ -2734,7 +2758,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
 
                ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
                if (ret) {
-                       DRM_ERROR("PSP ring init failed!\n");
+                       dev_err(adev->dev, "PSP ring init failed!\n");
                        goto failed;
                }
        }
@@ -2749,13 +2773,13 @@ static int psp_load_fw(struct amdgpu_device *adev)
 
        ret = psp_asd_initialize(psp);
        if (ret) {
-               DRM_ERROR("PSP load asd failed!\n");
+               dev_err(adev->dev, "PSP load asd failed!\n");
                goto failed1;
        }
 
        ret = psp_rl_load(adev);
        if (ret) {
-               DRM_ERROR("PSP load RL failed!\n");
+               dev_err(adev->dev, "PSP load RL failed!\n");
                goto failed1;
        }
 
@@ -2775,7 +2799,7 @@ static int psp_load_fw(struct amdgpu_device *adev)
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
-                                       "RAS: Failed to initialize RAS\n");
+                               "RAS: Failed to initialize RAS\n");
 
                ret = psp_hdcp_initialize(psp);
                if (ret)
@@ -2828,7 +2852,7 @@ static int psp_hw_init(void *handle)
 
        ret = psp_load_fw(adev);
        if (ret) {
-               DRM_ERROR("PSP firmware loading failed\n");
+               dev_err(adev->dev, "PSP firmware loading failed\n");
                goto failed;
        }
 
@@ -2875,7 +2899,7 @@ static int psp_suspend(void *handle)
            psp->xgmi_context.context.initialized) {
                ret = psp_xgmi_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate xgmi ta\n");
+                       dev_err(adev->dev, "Failed to terminate xgmi ta\n");
                        goto out;
                }
        }
@@ -2883,46 +2907,46 @@ static int psp_suspend(void *handle)
        if (psp->ta_fw) {
                ret = psp_ras_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate ras ta\n");
+                       dev_err(adev->dev, "Failed to terminate ras ta\n");
                        goto out;
                }
                ret = psp_hdcp_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate hdcp ta\n");
+                       dev_err(adev->dev, "Failed to terminate hdcp ta\n");
                        goto out;
                }
                ret = psp_dtm_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate dtm ta\n");
+                       dev_err(adev->dev, "Failed to terminate dtm ta\n");
                        goto out;
                }
                ret = psp_rap_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate rap ta\n");
+                       dev_err(adev->dev, "Failed to terminate rap ta\n");
                        goto out;
                }
                ret = psp_securedisplay_terminate(psp);
                if (ret) {
-                       DRM_ERROR("Failed to terminate securedisplay ta\n");
+                       dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
                        goto out;
                }
        }
 
        ret = psp_asd_terminate(psp);
        if (ret) {
-               DRM_ERROR("Failed to terminate asd\n");
+               dev_err(adev->dev, "Failed to terminate asd\n");
                goto out;
        }
 
        ret = psp_tmr_terminate(psp);
        if (ret) {
-               DRM_ERROR("Failed to terminate tmr\n");
+               dev_err(adev->dev, "Failed to terminate tmr\n");
                goto out;
        }
 
        ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
        if (ret)
-               DRM_ERROR("PSP ring stop failed\n");
+               dev_err(adev->dev, "PSP ring stop failed\n");
 
 out:
        return ret;
@@ -2934,12 +2958,12 @@ static int psp_resume(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
 
-       DRM_INFO("PSP is resuming...\n");
+       dev_info(adev->dev, "PSP is resuming...\n");
 
        if (psp->mem_train_ctx.enable_mem_training) {
                ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
                if (ret) {
-                       DRM_ERROR("Failed to process memory training!\n");
+                       dev_err(adev->dev, "Failed to process memory training!\n");
                        return ret;
                }
        }
@@ -2956,7 +2980,7 @@ static int psp_resume(void *handle)
 
        ret = psp_asd_initialize(psp);
        if (ret) {
-               DRM_ERROR("PSP load asd failed!\n");
+               dev_err(adev->dev, "PSP load asd failed!\n");
                goto failed;
        }
 
@@ -2980,7 +3004,7 @@ static int psp_resume(void *handle)
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
-                                       "RAS: Failed to initialize RAS\n");
+                               "RAS: Failed to initialize RAS\n");
 
                ret = psp_hdcp_initialize(psp);
                if (ret)
@@ -3008,7 +3032,7 @@ static int psp_resume(void *handle)
        return 0;
 
 failed:
-       DRM_ERROR("PSP resume failed\n");
+       dev_err(adev->dev, "PSP resume failed\n");
        mutex_unlock(&adev->firmware.mutex);
        return ret;
 }
@@ -3069,9 +3093,11 @@ int psp_ring_cmd_submit(struct psp_context *psp,
                write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
        /* Check invalid write_frame ptr address */
        if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
-               DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
-                         ring_buffer_start, ring_buffer_end, write_frame);
-               DRM_ERROR("write_frame is pointing to address out of bounds\n");
+               dev_err(adev->dev,
+                       "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+                       ring_buffer_start, ring_buffer_end, write_frame);
+               dev_err(adev->dev,
+                       "write_frame is pointing to address out of bounds\n");
                return -EINVAL;
        }
 
@@ -3597,7 +3623,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
        int ret;
 
        if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
-               DRM_INFO("PSP block is not ready yet.");
+               dev_info(adev->dev, "PSP block is not ready yet\n.");
                return -EBUSY;
        }
 
@@ -3606,7 +3632,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
        mutex_unlock(&adev->psp.mutex);
 
        if (ret) {
-               DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+               dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
                return ret;
        }
 
@@ -3628,7 +3654,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
        void *fw_pri_cpu_addr;
 
        if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
-               DRM_INFO("PSP block is not ready yet.");
+               dev_err(adev->dev, "PSP block is not ready yet.");
                return -EBUSY;
        }
 
@@ -3661,7 +3687,7 @@ rel_buf:
        release_firmware(usbc_pd_fw);
 fail:
        if (ret) {
-               DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+               dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
                count = ret;
        }
 
@@ -3708,7 +3734,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
 
        /* Safeguard against memory drain */
        if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
-               dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
+               dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
                kvfree(adev->psp.vbflash_tmp_buf);
                adev->psp.vbflash_tmp_buf = NULL;
                adev->psp.vbflash_image_size = 0;
@@ -3727,7 +3753,7 @@ static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
        adev->psp.vbflash_image_size += count;
        mutex_unlock(&adev->psp.mutex);
 
-       dev_dbg(adev->dev, "IFWI staged for update");
+       dev_dbg(adev->dev, "IFWI staged for update\n");
 
        return count;
 }
@@ -3747,7 +3773,7 @@ static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
        if (adev->psp.vbflash_image_size == 0)
                return -EINVAL;
 
-       dev_dbg(adev->dev, "PSP IFWI flash process initiated");
+       dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
 
        ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
                                        AMDGPU_GPU_PAGE_SIZE,
@@ -3772,11 +3798,11 @@ rel_buf:
        adev->psp.vbflash_image_size = 0;
 
        if (ret) {
-               dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
+               dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
                return ret;
        }
 
-       dev_dbg(adev->dev, "PSP IFWI flash process done");
+       dev_dbg(adev->dev, "PSP IFWI flash process done\n");
        return 0;
 }
 
index c4d9cbde55b9bc58799aa1acc9fe4eea29d1a98a..9951bdd022dedd2a57d4d659cd7cef68f69539e6 100644 (file)
@@ -134,7 +134,7 @@ struct psp_funcs {
        int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
        int (*vbflash_stat)(struct psp_context *psp);
        int (*fatal_error_recovery_quirk)(struct psp_context *psp);
-       int (*query_boot_status)(struct psp_context *psp);
+       bool (*get_ras_capability)(struct psp_context *psp);
 };
 
 struct ta_funcs {
@@ -502,6 +502,9 @@ int psp_ras_enable_features(struct psp_context *psp,
 int psp_ras_trigger_error(struct psp_context *psp,
                          struct ta_ras_trigger_error_input *info, uint32_t instance_mask);
 int psp_ras_terminate(struct psp_context *psp);
+int psp_ras_query_address(struct psp_context *psp,
+                         struct ta_ras_query_address_input *addr_in,
+                         struct ta_ras_query_address_output *addr_out);
 
 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -538,7 +541,5 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
 int is_psp_fw_valid(struct psp_bin_desc bin);
 
 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
-
-int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
-
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
 #endif
index 468a67b302d4c140c9d7cf09bc92566404180e75..ca5c86e5f7cd671a651d61357ab52d3c53a1e7f3 100644 (file)
@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
                }
        }
 
-       if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
+       if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
                ret = -EFAULT;
 
 err_free_shared_buf:
index 31823a30dea217b5af3a8a36624a01fab70b48a5..46f3d1013e8ced2183a5c6ac4c2dc6f8ebef1932 100644 (file)
@@ -39,6 +39,7 @@
 #include "nbio_v7_9.h"
 #include "atom.h"
 #include "amdgpu_reset.h"
+#include "amdgpu_psp.h"
 
 #ifdef CONFIG_X86_MCE_AMD
 #include <asm/mce.h>
@@ -73,6 +74,8 @@ const char *ras_block_string[] = {
        "mca",
        "vcn",
        "jpeg",
+       "ih",
+       "mpio",
 };
 
 const char *ras_mca_block_string[] = {
@@ -94,7 +97,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
        if (!ras_block)
                return "NULL";
 
-       if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
+       if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
+           ras_block->block >= ARRAY_SIZE(ras_block_string))
                return "OUT OF RANGE";
 
        if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
@@ -116,6 +120,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
 
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC  100  //ms
+
 enum amdgpu_ras_retire_page_reservation {
        AMDGPU_RAS_RETIRE_PAGE_RESERVED,
        AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -628,8 +634,12 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
                        dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
        }
 
-       return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
-                         "ce", info.ce_count);
+       if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
+               return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+                               "ce", info.ce_count, "de", info.de_count);
+       else
+               return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+                               "ce", info.ce_count);
 }
 
 /* obj begin */
@@ -1036,7 +1046,8 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                                              struct ras_manager *ras_mgr,
                                              struct ras_err_data *err_data,
                                              const char *blk_name,
-                                             bool is_ue)
+                                             bool is_ue,
+                                             bool is_de)
 {
        struct amdgpu_smuio_mcm_config_info *mcm_info;
        struct ras_err_node *err_node;
@@ -1065,25 +1076,50 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
                }
 
        } else {
-               for_each_ras_error(err_node, err_data) {
-                       err_info = &err_node->err_info;
-                       mcm_info = &err_info->mcm_info;
-                       if (err_info->ce_count) {
+               if (is_de) {
+                       for_each_ras_error(err_node, err_data) {
+                               err_info = &err_node->err_info;
+                               mcm_info = &err_info->mcm_info;
+                               if (err_info->de_count) {
+                                       dev_info(adev->dev, "socket: %d, die: %d, "
+                                               "%lld new deferred hardware errors detected in %s block\n",
+                                               mcm_info->socket_id,
+                                               mcm_info->die_id,
+                                               err_info->de_count,
+                                               blk_name);
+                               }
+                       }
+
+                       for_each_ras_error(err_node, &ras_mgr->err_data) {
+                               err_info = &err_node->err_info;
+                               mcm_info = &err_info->mcm_info;
                                dev_info(adev->dev, "socket: %d, die: %d, "
-                                        "%lld new correctable hardware errors detected in %s block\n",
-                                        mcm_info->socket_id,
-                                        mcm_info->die_id,
-                                        err_info->ce_count,
-                                        blk_name);
+                                       "%lld deferred hardware errors detected in total in %s block\n",
+                                       mcm_info->socket_id, mcm_info->die_id,
+                                       err_info->de_count, blk_name);
+                       }
+               } else {
+                       for_each_ras_error(err_node, err_data) {
+                               err_info = &err_node->err_info;
+                               mcm_info = &err_info->mcm_info;
+                               if (err_info->ce_count) {
+                                       dev_info(adev->dev, "socket: %d, die: %d, "
+                                               "%lld new correctable hardware errors detected in %s block\n",
+                                               mcm_info->socket_id,
+                                               mcm_info->die_id,
+                                               err_info->ce_count,
+                                               blk_name);
+                               }
                        }
-               }
 
-               for_each_ras_error(err_node, &ras_mgr->err_data) {
-                       err_info = &err_node->err_info;
-                       mcm_info = &err_info->mcm_info;
-                       dev_info(adev->dev, "socket: %d, die: %d, "
-                                "%lld correctable hardware errors detected in total in %s block\n",
-                                mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
+                       for_each_ras_error(err_node, &ras_mgr->err_data) {
+                               err_info = &err_node->err_info;
+                               mcm_info = &err_info->mcm_info;
+                               dev_info(adev->dev, "socket: %d, die: %d, "
+                                       "%lld correctable hardware errors detected in total in %s block\n",
+                                       mcm_info->socket_id, mcm_info->die_id,
+                                       err_info->ce_count, blk_name);
+                       }
                }
        }
 }
@@ -1102,7 +1138,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
 
        if (err_data->ce_count) {
                if (err_data_has_source_info(err_data)) {
-                       amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
+                       amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+                                                         blk_name, false, false);
                } else if (!adev->aid_mask &&
                           adev->smuio.funcs &&
                           adev->smuio.funcs->get_socket_id &&
@@ -1124,7 +1161,8 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
 
        if (err_data->ue_count) {
                if (err_data_has_source_info(err_data)) {
-                       amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
+                       amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+                                                         blk_name, true, false);
                } else if (!adev->aid_mask &&
                           adev->smuio.funcs &&
                           adev->smuio.funcs->get_socket_id &&
@@ -1144,6 +1182,28 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
                }
        }
 
+       if (err_data->de_count) {
+               if (err_data_has_source_info(err_data)) {
+                       amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data,
+                                                         blk_name, false, true);
+               } else if (!adev->aid_mask &&
+                          adev->smuio.funcs &&
+                          adev->smuio.funcs->get_socket_id &&
+                          adev->smuio.funcs->get_die_id) {
+                       dev_info(adev->dev, "socket: %d, die: %d "
+                                "%ld deferred hardware errors "
+                                "detected in %s block\n",
+                                adev->smuio.funcs->get_socket_id(adev),
+                                adev->smuio.funcs->get_die_id(adev),
+                                ras_mgr->err_data.de_count,
+                                blk_name);
+               } else {
+                       dev_info(adev->dev, "%ld deferred hardware errors "
+                                "detected in %s block\n",
+                                ras_mgr->err_data.de_count,
+                                blk_name);
+               }
+       }
 }
 
 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
@@ -1154,7 +1214,8 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
        if (err_data_has_source_info(err_data)) {
                for_each_ras_error(err_node, err_data) {
                        err_info = &err_node->err_info;
-
+                       amdgpu_ras_error_statistic_de_count(&obj->err_data,
+                                       &err_info->mcm_info, NULL, err_info->de_count);
                        amdgpu_ras_error_statistic_ce_count(&obj->err_data,
                                        &err_info->mcm_info, NULL, err_info->ce_count);
                        amdgpu_ras_error_statistic_ue_count(&obj->err_data,
@@ -1164,9 +1225,72 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
                /* for legacy asic path which doesn't has error source info */
                obj->err_data.ue_count += err_data->ue_count;
                obj->err_data.ce_count += err_data->ce_count;
+               obj->err_data.de_count += err_data->de_count;
        }
 }
 
+static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+       struct ras_common_if head;
+
+       memset(&head, 0, sizeof(head));
+       head.block = blk;
+
+       return amdgpu_ras_find_obj(adev, &head);
+}
+
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                       const struct aca_info *aca_info, void *data)
+{
+       struct ras_manager *obj;
+
+       obj = get_ras_manager(adev, blk);
+       if (!obj)
+               return -EINVAL;
+
+       return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
+}
+
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+       struct ras_manager *obj;
+
+       obj = get_ras_manager(adev, blk);
+       if (!obj)
+               return -EINVAL;
+
+       amdgpu_aca_remove_handle(&obj->aca_handle);
+
+       return 0;
+}
+
+static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                        enum aca_error_type type, struct ras_err_data *err_data)
+{
+       struct ras_manager *obj;
+
+       obj = get_ras_manager(adev, blk);
+       if (!obj)
+               return -EINVAL;
+
+       return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data);
+}
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+                                 struct aca_handle *handle, char *buf, void *data)
+{
+       struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
+       struct ras_query_if info = {
+               .head = obj->head,
+       };
+
+       if (amdgpu_ras_query_error_status(obj->adev, &info))
+               return -EINVAL;
+
+       return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+                         "ce", info.ce_count);
+}
+
 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
                                                struct ras_query_if *info,
                                                struct ras_err_data *err_data,
@@ -1174,6 +1298,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
 {
        enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
        struct amdgpu_ras_block_object *block_obj = NULL;
+       int ret;
 
        if (blk == AMDGPU_RAS_BLOCK_COUNT)
                return -EINVAL;
@@ -1203,9 +1328,19 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
                        }
                }
        } else {
-               /* FIXME: add code to check return value later */
-               amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
-               amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+               if (amdgpu_aca_is_enabled(adev)) {
+                       ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data);
+                       if (ret)
+                               return ret;
+
+                       ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data);
+                       if (ret)
+                               return ret;
+               } else {
+                       /* FIXME: add code to check return value later */
+                       amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
+                       amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+               }
        }
 
        return 0;
@@ -1239,6 +1374,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i
 
        info->ue_count = obj->err_data.ue_count;
        info->ce_count = obj->err_data.ce_count;
+       info->de_count = obj->err_data.de_count;
 
        amdgpu_ras_error_generate_report(adev, info, &err_data);
 
@@ -1254,6 +1390,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
        struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+       const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
        struct amdgpu_hive_info *hive;
        int hive_ras_recovery = 0;
 
@@ -1264,7 +1401,7 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
        }
 
        if (!amdgpu_ras_is_supported(adev, block) ||
-           !amdgpu_ras_get_mca_debug_mode(adev))
+           !amdgpu_ras_get_aca_debug_mode(adev))
                return -EOPNOTSUPP;
 
        hive = amdgpu_get_xgmi_hive(adev);
@@ -1276,7 +1413,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
        /* skip ras error reset in gpu reset */
        if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
            hive_ras_recovery) &&
-           mca_funcs && mca_funcs->mca_set_debug_mode)
+           ((smu_funcs && smu_funcs->set_debug_mode) ||
+            (mca_funcs && mca_funcs->mca_set_debug_mode)))
                return -EOPNOTSUPP;
 
        if (block_obj->hw_ops->reset_ras_error_count)
@@ -1772,7 +1910,10 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
                }
        }
 
-       amdgpu_mca_smu_debugfs_init(adev, dir);
+       if (amdgpu_aca_is_enabled(adev))
+               amdgpu_aca_smu_debugfs_init(adev, dir);
+       else
+               amdgpu_mca_smu_debugfs_init(adev, dir);
 }
 
 /* debugfs end */
@@ -1900,7 +2041,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
                }
        }
 
-       amdgpu_umc_poison_handler(adev, false);
+       amdgpu_umc_poison_handler(adev, obj->head.block, false);
 
        if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
                poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
@@ -1951,6 +2092,7 @@ static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
                 */
                obj->err_data.ue_count += err_data.ue_count;
                obj->err_data.ce_count += err_data.ce_count;
+               obj->err_data.de_count += err_data.de_count;
        }
 
        amdgpu_ras_error_data_fini(&err_data);
@@ -2520,6 +2662,32 @@ static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
        }
 }
 
+static int amdgpu_ras_page_retirement_thread(void *param)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)param;
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+       while (!kthread_should_stop()) {
+
+               wait_event_interruptible(con->page_retirement_wq,
+                               kthread_should_stop() ||
+                               atomic_read(&con->page_retirement_req_cnt));
+
+               if (kthread_should_stop())
+                       break;
+
+               dev_info(adev->dev, "Start processing page retirement. request:%d\n",
+                       atomic_read(&con->page_retirement_req_cnt));
+
+               atomic_dec(&con->page_retirement_req_cnt);
+
+               amdgpu_umc_bad_page_polling_timeout(adev,
+                               false, MAX_UMC_POISON_POLLING_TIME_ASYNC);
+       }
+
+       return 0;
+}
+
 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2583,6 +2751,16 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
                }
        }
 
+       mutex_init(&con->page_retirement_lock);
+       init_waitqueue_head(&con->page_retirement_wq);
+       atomic_set(&con->page_retirement_req_cnt, 0);
+       con->page_retirement_thread =
+               kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
+       if (IS_ERR(con->page_retirement_thread)) {
+               con->page_retirement_thread = NULL;
+               dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
+       }
+
 #ifdef CONFIG_X86_MCE_AMD
        if ((adev->asic_type == CHIP_ALDEBARAN) &&
            (adev->gmc.xgmi.connected_to_cpu))
@@ -2618,6 +2796,11 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
        if (!data)
                return 0;
 
+       if (con->page_retirement_thread)
+               kthread_stop(con->page_retirement_thread);
+
+       atomic_set(&con->page_retirement_req_cnt, 0);
+
        cancel_work_sync(&con->recovery_work);
 
        mutex_lock(&con->recovery_lock);
@@ -2679,6 +2862,87 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
                adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
 }
 
+/* Query ras capablity via atomfirmware interface */
+static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
+{
+       /* mem_ecc cap */
+       if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+               dev_info(adev->dev, "MEM ECC is active.\n");
+               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
+                                        1 << AMDGPU_RAS_BLOCK__DF);
+       } else {
+               dev_info(adev->dev, "MEM ECC is not presented.\n");
+       }
+
+       /* sram_ecc cap */
+       if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+               dev_info(adev->dev, "SRAM ECC is active.\n");
+               if (!amdgpu_sriov_vf(adev))
+                       adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+                                                 1 << AMDGPU_RAS_BLOCK__DF);
+               else
+                       adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+                                                1 << AMDGPU_RAS_BLOCK__SDMA |
+                                                1 << AMDGPU_RAS_BLOCK__GFX);
+
+               /*
+                * VCN/JPEG RAS can be supported on both bare metal and
+                * SRIOV environment
+                */
+               if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
+                   amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
+                   amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
+                       adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+                                                1 << AMDGPU_RAS_BLOCK__JPEG);
+               else
+                       adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+                                                 1 << AMDGPU_RAS_BLOCK__JPEG);
+
+               /*
+                * XGMI RAS is not supported if xgmi num physical nodes
+                * is zero
+                */
+               if (!adev->gmc.xgmi.num_physical_nodes)
+                       adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
+       } else {
+               dev_info(adev->dev, "SRAM ECC is not presented.\n");
+       }
+}
+
+/* Query poison mode from umc/df IP callbacks */
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       bool df_poison, umc_poison;
+
+       /* poison setting is useless on SRIOV guest */
+       if (amdgpu_sriov_vf(adev) || !con)
+               return;
+
+       /* Init poison supported flag, the default value is false */
+       if (adev->gmc.xgmi.connected_to_cpu ||
+           adev->gmc.is_app_apu) {
+               /* enabled by default when GPU is connected to CPU */
+               con->poison_supported = true;
+       } else if (adev->df.funcs &&
+           adev->df.funcs->query_ras_poison_mode &&
+           adev->umc.ras &&
+           adev->umc.ras->query_ras_poison_mode) {
+               df_poison =
+                       adev->df.funcs->query_ras_poison_mode(adev);
+               umc_poison =
+                       adev->umc.ras->query_ras_poison_mode(adev);
+
+               /* Only poison is set in both DF and UMC, we can support it */
+               if (df_poison && umc_poison)
+                       con->poison_supported = true;
+               else if (df_poison != umc_poison)
+                       dev_warn(adev->dev,
+                               "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+                               df_poison, umc_poison);
+       }
+}
+
 /*
  * check hardware's ras ability which will be saved in hw_supported.
  * if hardware does not support ras, we can skip some ras initializtion and
@@ -2695,49 +2959,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
        if (!amdgpu_ras_asic_supported(adev))
                return;
 
-       if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
-               if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
-                       dev_info(adev->dev, "MEM ECC is active.\n");
-                       adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
-                                                  1 << AMDGPU_RAS_BLOCK__DF);
-               } else {
-                       dev_info(adev->dev, "MEM ECC is not presented.\n");
-               }
-
-               if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
-                       dev_info(adev->dev, "SRAM ECC is active.\n");
-                       if (!amdgpu_sriov_vf(adev))
-                               adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
-                                                           1 << AMDGPU_RAS_BLOCK__DF);
-                       else
-                               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
-                                                               1 << AMDGPU_RAS_BLOCK__SDMA |
-                                                               1 << AMDGPU_RAS_BLOCK__GFX);
-
-                       /* VCN/JPEG RAS can be supported on both bare metal and
-                        * SRIOV environment
-                        */
-                       if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
-                                   IP_VERSION(2, 6, 0) ||
-                           amdgpu_ip_version(adev, VCN_HWIP, 0) ==
-                                   IP_VERSION(4, 0, 0) ||
-                           amdgpu_ip_version(adev, VCN_HWIP, 0) ==
-                                   IP_VERSION(4, 0, 3))
-                               adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
-                                                       1 << AMDGPU_RAS_BLOCK__JPEG);
-                       else
-                               adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
-                                                       1 << AMDGPU_RAS_BLOCK__JPEG);
+       /* query ras capability from psp */
+       if (amdgpu_psp_get_ras_capability(&adev->psp))
+               goto init_ras_enabled_flag;
 
-                       /*
-                        * XGMI RAS is not supported if xgmi num physical nodes
-                        * is zero
-                        */
-                       if (!adev->gmc.xgmi.num_physical_nodes)
-                               adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
-               } else {
-                       dev_info(adev->dev, "SRAM ECC is not presented.\n");
-               }
+       /* query ras capablity from bios */
+       if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+               amdgpu_ras_query_ras_capablity_from_vbios(adev);
        } else {
                /* driver only manages a few IP blocks RAS feature
                 * when GPU is connected cpu through XGMI */
@@ -2746,13 +2974,21 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
                                           1 << AMDGPU_RAS_BLOCK__MMHUB);
        }
 
+       /* apply asic specific settings (vega20 only for now) */
        amdgpu_ras_get_quirks(adev);
 
+       /* query poison mode from umc/df ip callback */
+       amdgpu_ras_query_poison_mode(adev);
+
+init_ras_enabled_flag:
        /* hw_supported needs to be aligned with RAS block mask. */
        adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
 
        adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
                adev->ras_hw_enabled & amdgpu_ras_mask;
+
+       /* aca is disabled by default */
+       adev->aca.is_enabled = false;
 }
 
 static void amdgpu_ras_counte_dw(struct work_struct *work)
@@ -2780,39 +3016,6 @@ Out:
        pm_runtime_put_autosuspend(dev->dev);
 }
 
-static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       bool df_poison, umc_poison;
-
-       /* poison setting is useless on SRIOV guest */
-       if (amdgpu_sriov_vf(adev) || !con)
-               return;
-
-       /* Init poison supported flag, the default value is false */
-       if (adev->gmc.xgmi.connected_to_cpu ||
-           adev->gmc.is_app_apu) {
-               /* enabled by default when GPU is connected to CPU */
-               con->poison_supported = true;
-       } else if (adev->df.funcs &&
-           adev->df.funcs->query_ras_poison_mode &&
-           adev->umc.ras &&
-           adev->umc.ras->query_ras_poison_mode) {
-               df_poison =
-                       adev->df.funcs->query_ras_poison_mode(adev);
-               umc_poison =
-                       adev->umc.ras->query_ras_poison_mode(adev);
-
-               /* Only poison is set in both DF and UMC, we can support it */
-               if (df_poison && umc_poison)
-                       con->poison_supported = true;
-               else if (df_poison != umc_poison)
-                       dev_warn(adev->dev,
-                               "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
-                               df_poison, umc_poison);
-       }
-}
-
 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
 {
        return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
@@ -2917,12 +3120,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
                        goto release_con;
        }
 
-       amdgpu_ras_query_poison_mode(adev);
-
        /* Packed socket_id to ras feature mask bits[31:29] */
        if (adev->smuio.funcs &&
            adev->smuio.funcs->get_socket_id)
-               con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << 29);
+               con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
+                                       AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
 
        /* Get RAS schema for particular SOC */
        con->schema = amdgpu_get_ras_schema(adev);
@@ -3128,7 +3330,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
 
        amdgpu_ras_disable_all_features(adev, 0);
        /* Make sure all ras objects are disabled. */
-       if (con->features)
+       if (AMDGPU_RAS_GET_FEATURES(con->features))
                amdgpu_ras_disable_all_features(adev, 1);
 }
 
@@ -3142,15 +3344,29 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       amdgpu_ras_set_mca_debug_mode(adev, false);
+       if (amdgpu_aca_is_enabled(adev)) {
+               if (amdgpu_in_reset(adev))
+                       r = amdgpu_aca_reset(adev);
+                else
+                       r = amdgpu_aca_init(adev);
+               if (r)
+                       return r;
+
+               amdgpu_ras_set_aca_debug_mode(adev, false);
+       } else {
+               amdgpu_ras_set_mca_debug_mode(adev, false);
+       }
 
        list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
-               if (!node->ras_obj) {
+               obj = node->ras_obj;
+               if (!obj) {
                        dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
                        continue;
                }
 
-               obj = node->ras_obj;
+               if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
+                       continue;
+
                if (obj->ras_late_init) {
                        r = obj->ras_late_init(adev, &obj->ras_comm);
                        if (r) {
@@ -3175,7 +3391,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
 
 
        /* Need disable ras on all IPs here before ip [hw/sw]fini */
-       if (con->features)
+       if (AMDGPU_RAS_GET_FEATURES(con->features))
                amdgpu_ras_disable_all_features(adev, 0);
        amdgpu_ras_recovery_fini(adev);
        return 0;
@@ -3208,10 +3424,13 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
        amdgpu_ras_fs_fini(adev);
        amdgpu_ras_interrupt_remove_all(adev);
 
-       WARN(con->features, "Feature mask is not cleared");
+       if (amdgpu_aca_is_enabled(adev))
+               amdgpu_aca_fini(adev);
 
-       if (con->features)
-               amdgpu_ras_disable_all_features(adev, 1);
+       WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
+
+       if (AMDGPU_RAS_GET_FEATURES(con->features))
+               amdgpu_ras_disable_all_features(adev, 0);
 
        cancel_delayed_work_sync(&con->ras_counte_delay_work);
 
@@ -3425,22 +3644,41 @@ int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
        if (con) {
                ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
                if (!ret)
-                       con->is_mca_debug_mode = enable;
+                       con->is_aca_debug_mode = enable;
+       }
+
+       return ret;
+}
+
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       int ret = 0;
+
+       if (con) {
+               if (amdgpu_aca_is_enabled(adev))
+                       ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
+               else
+                       ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+               if (!ret)
+                       con->is_aca_debug_mode = enable;
        }
 
        return ret;
 }
 
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
 
        if (!con)
                return false;
 
-       if (mca_funcs && mca_funcs->mca_set_debug_mode)
-               return con->is_mca_debug_mode;
+       if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
+           (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
+               return con->is_aca_debug_mode;
        else
                return true;
 }
@@ -3450,15 +3688,16 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+       const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
 
        if (!con) {
                *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
                return false;
        }
 
-       if (mca_funcs && mca_funcs->mca_set_debug_mode)
+       if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
                *error_query_mode =
-                       (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
+                       (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
        else
                *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
 
@@ -3699,8 +3938,7 @@ static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct
 }
 
 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
-                               struct amdgpu_smuio_mcm_config_info *mcm_info,
-                               struct ras_err_addr *err_addr)
+                               struct amdgpu_smuio_mcm_config_info *mcm_info)
 {
        struct ras_err_node *err_node;
 
@@ -3712,10 +3950,9 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
        if (!err_node)
                return NULL;
 
-       memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+       INIT_LIST_HEAD(&err_node->err_info.err_addr_list);
 
-       if (err_addr)
-               memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
+       memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
 
        err_data->err_list_count++;
        list_add_tail(&err_node->node, &err_data->err_node_list);
@@ -3724,6 +3961,29 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
        return &err_node->err_info;
 }
 
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr)
+{
+       struct ras_err_addr *mca_err_addr;
+
+       mca_err_addr = kzalloc(sizeof(*mca_err_addr), GFP_KERNEL);
+       if (!mca_err_addr)
+               return;
+
+       INIT_LIST_HEAD(&mca_err_addr->node);
+
+       mca_err_addr->err_status = err_addr->err_status;
+       mca_err_addr->err_ipid = err_addr->err_ipid;
+       mca_err_addr->err_addr = err_addr->err_addr;
+
+       list_add_tail(&mca_err_addr->node, &err_info->err_addr_list);
+}
+
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr)
+{
+       list_del(&mca_err_addr->node);
+       kfree(mca_err_addr);
+}
+
 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
                struct amdgpu_smuio_mcm_config_info *mcm_info,
                struct ras_err_addr *err_addr, u64 count)
@@ -3736,10 +3996,13 @@ int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
        if (!count)
                return 0;
 
-       err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+       err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
        if (!err_info)
                return -EINVAL;
 
+       if (err_addr && err_addr->err_status)
+               amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
        err_info->ue_count += count;
        err_data->ue_count += count;
 
@@ -3758,7 +4021,7 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
        if (!count)
                return 0;
 
-       err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
+       err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
        if (!err_info)
                return -EINVAL;
 
@@ -3767,3 +4030,135 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 
        return 0;
 }
+
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count)
+{
+       struct ras_err_info *err_info;
+
+       if (!err_data || !mcm_info)
+               return -EINVAL;
+
+       if (!count)
+               return 0;
+
+       err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+       if (!err_info)
+               return -EINVAL;
+
+       if (err_addr && err_addr->err_status)
+               amdgpu_ras_add_mca_err_addr(err_info, err_addr);
+
+       err_info->de_count += count;
+       err_data->de_count += count;
+
+       return 0;
+}
+
+#define mmMP0_SMN_C2PMSG_92    0x1609C
+#define mmMP0_SMN_C2PMSG_126   0x160BE
+static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
+                                                u32 instance, u32 boot_error)
+{
+       u32 socket_id, aid_id, hbm_id;
+       u32 reg_data;
+       u64 reg_addr;
+
+       socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
+       aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
+       hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error);
+
+       /* The pattern for smn addressing in other SOC could be different from
+        * the one for aqua_vanjaram. We should revisit the code if the pattern
+        * is changed. In such case, replace the aqua_vanjaram implementation
+        * with more common helper */
+       reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+                  aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+       reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+       dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
+               socket_id, aid_id, reg_data);
+
+       if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
+                        socket_id, aid_id, hbm_id);
+
+       if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
+                        socket_id, aid_id);
+
+       if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
+                        socket_id, aid_id);
+
+       if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
+                        socket_id, aid_id);
+
+       if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
+                        socket_id, aid_id);
+
+       if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
+                        socket_id, aid_id);
+
+       if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
+                        socket_id, aid_id, hbm_id);
+
+       if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
+                        socket_id, aid_id, hbm_id);
+}
+
+static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev,
+                                            u32 instance, u32 *boot_error)
+{
+       u32 reg_addr;
+       u32 reg_data;
+       int retry_loop;
+
+       reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+                  aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+       for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+               reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+               if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) {
+                       *boot_error = AMDGPU_RAS_BOOT_SUCEESS;
+                       return 0;
+               }
+               msleep(1);
+       }
+
+       /* The pattern for smn addressing in other SOC could be different from
+        * the one for aqua_vanjaram. We should revisit the code if the pattern
+        * is changed. In such case, replace the aqua_vanjaram implementation
+        * with more common helper */
+       reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
+                  aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+       for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+               reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+               if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) {
+                       *boot_error = reg_data;
+                       return 0;
+               }
+               msleep(1);
+       }
+
+       *boot_error = reg_data;
+       return -ETIME;
+}
+
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
+{
+       u32 boot_error = 0;
+       u32 i;
+
+       for (i = 0; i < num_instances; i++) {
+               if (amdgpu_ras_wait_for_boot_complete(adev, i, &boot_error))
+                       amdgpu_ras_boot_time_error_reporting(adev, i, boot_error);
+       }
+}
index 76fb85628716f6302b3c02beb0965c85f2723a05..d10e5bb0e52f007cdbb380b44ae0b663d89544cd 100644 (file)
 #include "ta_ras_if.h"
 #include "amdgpu_ras_eeprom.h"
 #include "amdgpu_smuio.h"
+#include "amdgpu_aca.h"
 
 struct amdgpu_iv_entry;
 
+#define AMDGPU_RAS_GPU_ERR_MEM_TRAINING(x)             AMDGPU_GET_REG_FIELD(x, 0, 0)
+#define AMDGPU_RAS_GPU_ERR_FW_LOAD(x)                  AMDGPU_GET_REG_FIELD(x, 1, 1)
+#define AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(x)       AMDGPU_GET_REG_FIELD(x, 2, 2)
+#define AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(x)       AMDGPU_GET_REG_FIELD(x, 3, 3)
+#define AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(x)     AMDGPU_GET_REG_FIELD(x, 4, 4)
+#define AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(x)     AMDGPU_GET_REG_FIELD(x, 5, 5)
+#define AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(x)             AMDGPU_GET_REG_FIELD(x, 6, 6)
+#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x)            AMDGPU_GET_REG_FIELD(x, 7, 7)
+#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x)                        AMDGPU_GET_REG_FIELD(x, 10, 8)
+#define AMDGPU_RAS_GPU_ERR_AID_ID(x)                   AMDGPU_GET_REG_FIELD(x, 12, 11)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x)                   AMDGPU_GET_REG_FIELD(x, 13, 13)
+#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x)              AMDGPU_GET_REG_FIELD(x, 31, 31)
+
+#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT   1000
+#define AMDGPU_RAS_BOOT_STEADY_STATUS          0xBA
+#define AMDGPU_RAS_BOOT_STATUS_MASK            0xFF
+#define AMDGPU_RAS_BOOT_SUCEESS                        0x80000000
+
 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS          (0x1 << 0)
 /* position of instance value in sub_block_index of
  * ta_ras_trigger_error_input, the sub block uses lower 12 bits
@@ -39,6 +58,12 @@ struct amdgpu_iv_entry;
 #define AMDGPU_RAS_INST_MASK 0xfffff000
 #define AMDGPU_RAS_INST_SHIFT 0xc
 
+#define AMDGPU_RAS_FEATURES_SOCKETID_SHIFT 29
+#define AMDGPU_RAS_FEATURES_SOCKETID_MASK 0xe0000000
+
+/* The high three bits indicates socketid */
+#define AMDGPU_RAS_GET_FEATURES(val)  ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+
 enum amdgpu_ras_block {
        AMDGPU_RAS_BLOCK__UMC = 0,
        AMDGPU_RAS_BLOCK__SDMA,
@@ -57,6 +82,8 @@ enum amdgpu_ras_block {
        AMDGPU_RAS_BLOCK__MCA,
        AMDGPU_RAS_BLOCK__VCN,
        AMDGPU_RAS_BLOCK__JPEG,
+       AMDGPU_RAS_BLOCK__IH,
+       AMDGPU_RAS_BLOCK__MPIO,
 
        AMDGPU_RAS_BLOCK__LAST
 };
@@ -441,10 +468,15 @@ struct amdgpu_ras {
        /* Indicates smu whether need update bad channel info */
        bool update_channel_flag;
        /* Record status of smu mca debug mode */
-       bool is_mca_debug_mode;
+       bool is_aca_debug_mode;
 
        /* Record special requirements of gpu reset caller */
        uint32_t  gpu_reset_flags;
+
+       struct task_struct *page_retirement_thread;
+       wait_queue_head_t page_retirement_wq;
+       struct mutex page_retirement_lock;
+       atomic_t page_retirement_req_cnt;
 };
 
 struct ras_fs_data {
@@ -453,6 +485,7 @@ struct ras_fs_data {
 };
 
 struct ras_err_addr {
+       struct list_head node;
        uint64_t err_status;
        uint64_t err_ipid;
        uint64_t err_addr;
@@ -462,7 +495,8 @@ struct ras_err_info {
        struct amdgpu_smuio_mcm_config_info mcm_info;
        u64 ce_count;
        u64 ue_count;
-       struct ras_err_addr err_addr;
+       u64 de_count;
+       struct list_head err_addr_list;
 };
 
 struct ras_err_node {
@@ -473,6 +507,7 @@ struct ras_err_node {
 struct ras_err_data {
        unsigned long ue_count;
        unsigned long ce_count;
+       unsigned long de_count;
        unsigned long err_addr_cnt;
        struct eeprom_table_record *err_addr;
        u32 err_list_count;
@@ -529,6 +564,8 @@ struct ras_manager {
        struct ras_ih_data ih_data;
 
        struct ras_err_data err_data;
+
+       struct aca_handle aca_handle;
 };
 
 struct ras_badpage {
@@ -548,6 +585,7 @@ struct ras_query_if {
        struct ras_common_if head;
        unsigned long ue_count;
        unsigned long ce_count;
+       unsigned long de_count;
 };
 
 struct ras_inject_if {
@@ -781,7 +819,8 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
 
 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
-bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable);
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev);
 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
                                     unsigned int *mode);
 
@@ -818,5 +857,20 @@ int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
                struct amdgpu_smuio_mcm_config_info *mcm_info,
                struct ras_err_addr *err_addr, u64 count);
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+               struct amdgpu_smuio_mcm_config_info *mcm_info,
+               struct ras_err_addr *err_addr, u64 count);
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                              const struct aca_info *aca_info, void *data);
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk);
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+                                 struct aca_handle *handle, char *buf, void *data);
+
+void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info,
+                       struct ras_err_addr *err_addr);
 
+void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info,
+               struct ras_err_addr *mca_err_addr);
 #endif
index 2c3675d91614f13b21e35a08d57c5fc7bca32090..db5791e1a7cefbed9ef3ffaf438ba1c952ad3f7b 100644 (file)
@@ -241,7 +241,7 @@ void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
                        table_size = le32_to_cpu(hdr->jt_size);
                }
 
-               for (i = 0; i < table_size; i ++) {
+               for (i = 0; i < table_size; i++) {
                        dst_ptr[bo_offset + i] =
                                cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
                }
index b591d33af26452aa58066cd0ea346ef3526305e5..5a17e0ff2ab892957e2cbf8070e25c8e66cf9848 100644 (file)
@@ -169,7 +169,7 @@ struct amdgpu_rlc_funcs {
        void (*stop)(struct amdgpu_device *adev);
        void (*reset)(struct amdgpu_device *adev);
        void (*start)(struct amdgpu_device *adev);
-       void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+       void (*update_spm_vmid)(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid);
        bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
index 7a6a67275404c805568f7f7a405cc77af778e5cb..e9081a98cf81ac766b25b7da9334e1a33f0e39a9 100644 (file)
  * counters and VM updates. It has maximum count of 32768 64 bit slots.
  */
 
+/**
+ * amdgpu_seq64_get_va_base - Get the seq64 va base address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * va base address on success
+ */
+static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
+{
+       u64 addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+       addr -= AMDGPU_VA_RESERVED_TOP;
+
+       return addr;
+}
+
 /**
  * amdgpu_seq64_map - Map the seq64 memory to VM
  *
  * @adev: amdgpu_device pointer
  * @vm: vm pointer
  * @bo_va: bo_va pointer
- * @seq64_addr: seq64 vaddr start address
- * @size: seq64 pool size
  *
  * Map the seq64 memory to the given VM.
  *
  * 0 on success or a negative error code on failure
  */
 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                    struct amdgpu_bo_va **bo_va, u64 seq64_addr,
-                    uint32_t size)
+                    struct amdgpu_bo_va **bo_va)
 {
        struct amdgpu_bo *bo;
        struct drm_exec exec;
+       u64 seq64_addr;
        int r;
 
        bo = adev->seq64.sbo;
@@ -77,9 +92,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                goto error;
        }
 
-       r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, size,
-                            AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
-                            AMDGPU_PTE_EXECUTABLE);
+       seq64_addr = amdgpu_seq64_get_va_base(adev);
+       r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
+                            AMDGPU_PTE_READABLE);
        if (r) {
                DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
                amdgpu_vm_bo_del(adev, *bo_va);
@@ -144,31 +159,25 @@ error:
  * amdgpu_seq64_alloc - Allocate a 64 bit memory
  *
  * @adev: amdgpu_device pointer
- * @gpu_addr: allocated gpu VA start address
- * @cpu_addr: allocated cpu VA start address
+ * @va: VA to access the seq in process address space
+ * @cpu_addr: CPU address to access the seq
  *
  * Alloc a 64 bit memory from seq64 pool.
  *
  * Returns:
  * 0 on success or a negative error code on failure
  */
-int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
-                      u64 **cpu_addr)
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 **cpu_addr)
 {
        unsigned long bit_pos;
-       u32 offset;
 
        bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
+       if (bit_pos >= adev->seq64.num_sem)
+               return -ENOSPC;
 
-       if (bit_pos < adev->seq64.num_sem) {
-               __set_bit(bit_pos, adev->seq64.used);
-               offset = bit_pos << 6; /* convert to qw offset */
-       } else {
-               return -EINVAL;
-       }
-
-       *gpu_addr = offset + AMDGPU_SEQ64_VADDR_START;
-       *cpu_addr = offset + adev->seq64.cpu_base_addr;
+       __set_bit(bit_pos, adev->seq64.used);
+       *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+       *cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
 
        return 0;
 }
@@ -177,20 +186,17 @@ int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr,
  * amdgpu_seq64_free - Free the given 64 bit memory
  *
  * @adev: amdgpu_device pointer
- * @gpu_addr: gpu start address to be freed
+ * @va: gpu start address to be freed
  *
  * Free the given 64 bit memory from seq64 pool.
- *
  */
-void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr)
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
 {
-       u32 offset;
-
-       offset = gpu_addr - AMDGPU_SEQ64_VADDR_START;
+       unsigned long bit_pos;
 
-       offset >>= 6;
-       if (offset < adev->seq64.num_sem)
-               __clear_bit(offset, adev->seq64.used);
+       bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
+       if (bit_pos < adev->seq64.num_sem)
+               __clear_bit(bit_pos, adev->seq64.used);
 }
 
 /**
@@ -229,7 +235,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
         * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
         * 64bit slots
         */
-       r = amdgpu_bo_create_kernel(adev, AMDGPU_SEQ64_SIZE,
+       r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
                                    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
                                    &adev->seq64.sbo, NULL,
                                    (void **)&adev->seq64.cpu_base_addr);
@@ -238,7 +244,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev)
                return r;
        }
 
-       memset(adev->seq64.cpu_base_addr, 0, AMDGPU_SEQ64_SIZE);
+       memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
 
        adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
        memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
index 2196e72be508eeada66e2fc2297c98f223af891c..4203b2ab318df64a5b41959007b65e843c5d89ee 100644 (file)
 #ifndef __AMDGPU_SEQ64_H__
 #define __AMDGPU_SEQ64_H__
 
-#define AMDGPU_SEQ64_SIZE              (2ULL << 20)
-#define AMDGPU_MAX_SEQ64_SLOTS         (AMDGPU_SEQ64_SIZE / (sizeof(u64) * 8))
-#define AMDGPU_SEQ64_VADDR_OFFSET      0x50000
-#define AMDGPU_SEQ64_VADDR_START       (AMDGPU_VA_RESERVED_SIZE + AMDGPU_SEQ64_VADDR_OFFSET)
+#include "amdgpu_vm.h"
+
+#define AMDGPU_MAX_SEQ64_SLOTS         (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64))
 
 struct amdgpu_seq64 {
        struct amdgpu_bo *sbo;
@@ -42,7 +41,7 @@ int amdgpu_seq64_init(struct amdgpu_device *adev);
 int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *gpu_addr, u64 **cpu_addr);
 void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
 int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                    struct amdgpu_bo_va **bo_va, u64 seq64_addr, uint32_t size);
+                    struct amdgpu_bo_va **bo_va);
 void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
 
 #endif
index d65e21914d8c4a9e8f7d03a59391504e08a46f63..20436f81856ad280f112bf52dd42ea6157443b04 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "amdgpu.h"
 #include "umc_v6_7.h"
+#define MAX_UMC_POISON_POLLING_TIME_SYNC   20  //ms
 
 static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
                                    struct ras_err_data *err_data, uint64_t err_addr,
@@ -85,18 +86,21 @@ out_fini_err_data:
        return ret;
 }
 
-static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
-               void *ras_error_status,
-               struct amdgpu_iv_entry *entry,
-               bool reset)
+static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+                       void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       unsigned int error_query_mode;
        int ret = 0;
+       unsigned long err_count;
 
-       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+       amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+
+       mutex_lock(&con->page_retirement_lock);
        ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
-       if (ret == -EOPNOTSUPP) {
+       if (ret == -EOPNOTSUPP &&
+           error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
                if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
                    adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
                    adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
@@ -120,7 +124,8 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                         */
                        adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
                }
-       } else if (!ret) {
+       } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+           (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
                if (adev->umc.ras &&
                    adev->umc.ras->ecc_info_query_ras_error_count)
                    adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
@@ -147,16 +152,13 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
        }
 
        /* only uncorrectable error needs gpu reset */
-       if (err_data->ue_count) {
-               dev_info(adev->dev, "%ld uncorrectable hardware errors "
-                               "detected in UMC block\n",
-                               err_data->ue_count);
-
+       if (err_data->ue_count || err_data->de_count) {
+               err_count = err_data->ue_count + err_data->de_count;
                if ((amdgpu_bad_page_threshold != 0) &&
                        err_data->err_addr_cnt) {
                        amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
                                                err_data->err_addr_cnt);
-                       amdgpu_ras_save_bad_pages(adev, &(err_data->ue_count));
+                       amdgpu_ras_save_bad_pages(adev, &err_count);
 
                        amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
 
@@ -165,20 +167,87 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                                con->update_channel_flag = false;
                        }
                }
-
-               if (reset) {
-                       /* use mode-2 reset for poison consumption */
-                       if (!entry)
-                               con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
-                       amdgpu_ras_reset_gpu(adev);
-               }
        }
 
        kfree(err_data->err_addr);
+
+       mutex_unlock(&con->page_retirement_lock);
+}
+
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+               void *ras_error_status,
+               struct amdgpu_iv_entry *entry,
+               bool reset)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+       amdgpu_umc_handle_bad_pages(adev, ras_error_status);
+
+       if (err_data->ue_count && reset) {
+               /* use mode-2 reset for poison consumption */
+               if (!entry)
+                       con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+               amdgpu_ras_reset_gpu(adev);
+       }
+
        return AMDGPU_RAS_SUCCESS;
 }
 
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+                       bool reset, uint32_t timeout_ms)
+{
+       struct ras_err_data err_data;
+       struct ras_common_if head = {
+               .block = AMDGPU_RAS_BLOCK__UMC,
+       };
+       struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+       uint32_t timeout = timeout_ms;
+
+       memset(&err_data, 0, sizeof(err_data));
+       amdgpu_ras_error_data_init(&err_data);
+
+       do {
+
+               amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+               if (timeout && !err_data.de_count) {
+                       msleep(1);
+                       timeout--;
+               }
+
+       } while (timeout && !err_data.de_count);
+
+       if (!timeout)
+               dev_warn(adev->dev, "Can't find bad pages\n");
+
+       if (err_data.de_count)
+               dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
+
+       if (obj) {
+               obj->err_data.ue_count += err_data.ue_count;
+               obj->err_data.ce_count += err_data.ce_count;
+               obj->err_data.de_count += err_data.de_count;
+       }
+
+       amdgpu_ras_error_data_fini(&err_data);
+
+       kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+       if (reset) {
+               struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+               /* use mode-2 reset for poison consumption */
+               con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+               amdgpu_ras_reset_gpu(adev);
+       }
+
+       return 0;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+                       enum amdgpu_ras_block block, bool reset)
 {
        int ret = AMDGPU_RAS_SUCCESS;
 
@@ -195,27 +264,41 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
        }
 
        if (!amdgpu_sriov_vf(adev)) {
-               struct ras_err_data err_data;
-               struct ras_common_if head = {
-                       .block = AMDGPU_RAS_BLOCK__UMC,
-               };
-               struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+               if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+                       struct ras_err_data err_data;
+                       struct ras_common_if head = {
+                               .block = AMDGPU_RAS_BLOCK__UMC,
+                       };
+                       struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+                       ret = amdgpu_ras_error_data_init(&err_data);
+                       if (ret)
+                               return ret;
 
-               ret = amdgpu_ras_error_data_init(&err_data);
-               if (ret)
-                       return ret;
+                       ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
 
-               ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+                       if (ret == AMDGPU_RAS_SUCCESS && obj) {
+                               obj->err_data.ue_count += err_data.ue_count;
+                               obj->err_data.ce_count += err_data.ce_count;
+                               obj->err_data.de_count += err_data.de_count;
+                       }
 
-               if (ret == AMDGPU_RAS_SUCCESS && obj) {
-                       obj->err_data.ue_count += err_data.ue_count;
-                       obj->err_data.ce_count += err_data.ce_count;
-               }
+                       amdgpu_ras_error_data_fini(&err_data);
+               } else {
+                       if (reset) {
+                               amdgpu_umc_bad_page_polling_timeout(adev,
+                                                       reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
+                       } else {
+                               struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
-               amdgpu_ras_error_data_fini(&err_data);
+                               atomic_inc(&con->page_retirement_req_cnt);
+
+                               wake_up(&con->page_retirement_wq);
+                       }
+               }
        } else {
                if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
-                       adev->virt.ops->ras_poison_handler(adev);
+                       adev->virt.ops->ras_poison_handler(adev, block);
                else
                        dev_warn(adev->dev,
                                "No ras_poison_handler interface in SRIOV!\n");
index 417a6726c71b479943cf1615040304f6c9f79551..26d2ae498daf22bf2833cfdde9333c5f26523b44 100644 (file)
@@ -21,7 +21,7 @@
 #ifndef __AMDGPU_UMC_H__
 #define __AMDGPU_UMC_H__
 #include "amdgpu_ras.h"
-
+#include "amdgpu_mca.h"
 /*
  * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
  * is the index of 4KB block
@@ -64,6 +64,8 @@ struct amdgpu_umc_ras {
                                      void *ras_error_status);
        void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
                                        void *ras_error_status);
+       bool (*check_ecc_err_status)(struct amdgpu_device *adev,
+                       enum amdgpu_mca_error_type type, void *ras_error_status);
        /* support different eeprom table version for different asic */
        void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr);
 };
@@ -100,7 +102,8 @@ struct amdgpu_umc {
 
 int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
-int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+                       enum amdgpu_ras_block block, bool reset);
 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
                struct amdgpu_irq_src *source,
                struct amdgpu_iv_entry *entry);
@@ -118,4 +121,7 @@ int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
 
 int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
                        umc_func func, void *data);
+
+int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
+                       bool reset, uint32_t timeout_ms);
 #endif
index 107f9bb0e24f7f2f8e810084cab0e43e8da9f020..5b27fc41ffbf27396ce1ea3f3827fde1cae70c37 100644 (file)
@@ -69,12 +69,12 @@ struct amdgpu_debugfs_gprwave_data {
 };
 
 enum AMDGPU_DEBUGFS_REGS2_CMDS {
-       AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+       AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE = 0,
        AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2,
 };
 
 enum AMDGPU_DEBUGFS_GPRWAVE_CMDS {
-       AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE=0,
+       AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE = 0,
 };
 
 //reg2 interface
index bfbf59326ee12d5bc5a7dd3def0a77e74971e008..ab820cf526683b5a15ef94ee28075f20752baeac 100644 (file)
@@ -358,7 +358,7 @@ static int setup_umsch_mm_test(struct amdgpu_device *adev,
 
        memset(test->ring_data_cpu_addr, 0, sizeof(struct umsch_mm_test_ring_data));
 
-       test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
+       test->ring_data_gpu_addr = AMDGPU_VA_RESERVED_BOTTOM;
        r = map_ring_data(adev, test->vm, test->ring_data_obj, &test->bo_va,
                          test->ring_data_gpu_addr, sizeof(struct umsch_mm_test_ring_data));
        if (r)
index f4963330c772a9c717dae129e6ccf2ec1c4d3ef0..f300d4a4457d39ed613977ec620866efc10f4972 100644 (file)
@@ -1189,7 +1189,7 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
                amdgpu_ras_interrupt_dispatch(adev, &ih_data);
        } else {
                if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
-                       adev->virt.ops->ras_poison_handler(adev);
+                       adev->virt.ops->ras_poison_handler(adev, ras_if->block);
                else
                        dev_warn(adev->dev,
                                "No ras_poison_handler interface in SRIOV for VCN!\n");
index 0dcff2889e25d2b8883a39eaf9d09755bb1c2373..6ff7d3fb2008038d7419ce2105759a7a96a24921 100644 (file)
@@ -71,59 +71,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
                amdgpu_num_kcq = 2;
 }
 
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-                                       uint32_t reg0, uint32_t reg1,
-                                       uint32_t ref, uint32_t mask,
-                                       uint32_t xcc_inst)
-{
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
-       struct amdgpu_ring *ring = &kiq->ring;
-       signed long r, cnt = 0;
-       unsigned long flags;
-       uint32_t seq;
-
-       if (adev->mes.ring.sched.ready) {
-               amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
-                                             ref, mask);
-               return;
-       }
-
-       spin_lock_irqsave(&kiq->ring_lock, flags);
-       amdgpu_ring_alloc(ring, 32);
-       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
-                                           ref, mask);
-       r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
-       if (r)
-               goto failed_undo;
-
-       amdgpu_ring_commit(ring);
-       spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-       /* don't wait anymore for IRQ context */
-       if (r < 1 && in_interrupt())
-               goto failed_kiq;
-
-       might_sleep();
-       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-
-               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-       }
-
-       if (cnt > MAX_KIQ_REG_TRY)
-               goto failed_kiq;
-
-       return;
-
-failed_undo:
-       amdgpu_ring_undo(ring);
-       spin_unlock_irqrestore(&kiq->ring_lock, flags);
-failed_kiq:
-       dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
-}
-
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @adev:      amdgpu device.
@@ -303,11 +250,11 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
        if (!*data)
                goto data_failure;
 
-       bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
+       bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
        if (!bps)
                goto bps_failure;
 
-       bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
+       bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
        if (!bps_bo)
                goto bps_bo_failure;
 
@@ -340,8 +287,10 @@ static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
 
        for (i = data->last_reserved - 1; i >= 0; i--) {
                bo = data->bps_bo[i];
-               amdgpu_bo_free_kernel(&bo, NULL, NULL);
-               data->bps_bo[i] = bo;
+               if (bo) {
+                       amdgpu_bo_free_kernel(&bo, NULL, NULL);
+                       data->bps_bo[i] = bo;
+               }
                data->last_reserved = i;
        }
 }
@@ -381,6 +330,8 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
 {
        struct amdgpu_virt *virt = &adev->virt;
        struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+       struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+       struct ttm_resource_manager *man = &mgr->manager;
        struct amdgpu_bo *bo = NULL;
        uint64_t bp;
        int i;
@@ -396,12 +347,18 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
                 * 2) a ras bad page has been reserved (duplicate error injection
                 *    for one page);
                 */
-               if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
-                                              AMDGPU_GPU_PAGE_SIZE,
-                                              &bo, NULL))
-                       DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
-
-               data->bps_bo[i] = bo;
+               if  (ttm_resource_manager_used(man)) {
+                       amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
+                               bp << AMDGPU_GPU_PAGE_SHIFT,
+                               AMDGPU_GPU_PAGE_SIZE);
+                       data->bps_bo[i] = NULL;
+               } else {
+                       if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+                                                       AMDGPU_GPU_PAGE_SIZE,
+                                                       &bo, NULL))
+                               DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+                       data->bps_bo[i] = bo;
+               }
                data->last_reserved = i + 1;
                bo = NULL;
        }
index d4207e44141f185bbcd28e28d638b084fda09ec9..fa7be5f277b957b2e8fa9dd9ebef2c543991aa41 100644 (file)
@@ -88,7 +88,8 @@ struct amdgpu_virt_ops {
        int (*wait_reset)(struct amdgpu_device *adev);
        void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
                          u32 data1, u32 data2, u32 data3);
-       void (*ras_poison_handler)(struct amdgpu_device *adev);
+       void (*ras_poison_handler)(struct amdgpu_device *adev,
+                                       enum amdgpu_ras_block block);
 };
 
 /*
@@ -332,10 +333,6 @@ static inline bool is_virtual_machine(void)
        ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-                                       uint32_t reg0, uint32_t rreg1,
-                                       uint32_t ref, uint32_t mask,
-                                       uint32_t xcc_inst);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
index 453a4b786cfcc15c8ac26dd4f9d49a254fbc6228..8baa2e0935cc6e6c3ca6dd0fab54914932514dc9 100644 (file)
@@ -660,8 +660,7 @@ static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
        .set_powergating_state = amdgpu_vkms_set_powergating_state,
 };
 
-const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
-{
+const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_DCE,
        .major = 1,
        .minor = 0,
index b8fcb6c55698934549c6696a337e749f0e268217..ed4a8c5d26d7993b6bb342e35501ad450c39b7c7 100644 (file)
@@ -233,6 +233,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
        spin_unlock(&vm_bo->vm->status_lock);
 }
 
+/**
+ * amdgpu_vm_bo_evicted_user - vm_bo is evicted
+ *
+ * @vm_bo: vm_bo which is evicted
+ *
+ * State for BOs used by user mode queues which are not at the location they
+ * should be.
+ */
+static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
+{
+       vm_bo->moved = true;
+       spin_lock(&vm_bo->vm->status_lock);
+       list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+       spin_unlock(&vm_bo->vm->status_lock);
+}
+
 /**
  * amdgpu_vm_bo_relocated - vm_bo is reloacted
  *
@@ -427,21 +443,25 @@ uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 }
 
 /**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_validate - validate evicted BOs tracked in the VM
  *
  * @adev: amdgpu device pointer
  * @vm: vm providing the BOs
+ * @ticket: optional reservation ticket used to reserve the VM
  * @validate: callback to do the validation
  * @param: parameter for the validation callback
  *
- * Validate the page table BOs on command submission if neccessary.
+ * Validate the page table BOs and per-VM BOs on command submission if
+ * necessary. If a ticket is given, also try to validate evicted user queue
+ * BOs. They must already be reserved with the given ticket.
  *
  * Returns:
  * Validation result.
  */
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                             int (*validate)(void *p, struct amdgpu_bo *bo),
-                             void *param)
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                      struct ww_acquire_ctx *ticket,
+                      int (*validate)(void *p, struct amdgpu_bo *bo),
+                      void *param)
 {
        struct amdgpu_vm_bo_base *bo_base;
        struct amdgpu_bo *shadow;
@@ -484,6 +504,28 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                }
                spin_lock(&vm->status_lock);
        }
+       while (ticket && !list_empty(&vm->evicted_user)) {
+               bo_base = list_first_entry(&vm->evicted_user,
+                                          struct amdgpu_vm_bo_base,
+                                          vm_status);
+               spin_unlock(&vm->status_lock);
+
+               bo = bo_base->bo;
+
+               if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
+                       pr_warn_ratelimited("Evicted user BO is not reserved in pid %d\n",
+                                           vm->task_info.pid);
+                       return -EINVAL;
+               }
+
+               r = validate(param, bo);
+               if (r)
+                       return r;
+
+               amdgpu_vm_bo_invalidated(bo_base);
+
+               spin_lock(&vm->status_lock);
+       }
        spin_unlock(&vm->status_lock);
 
        amdgpu_vm_eviction_lock(vm);
@@ -651,7 +693,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
        if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
 
        if (!ring->is_mes_queue && ring->funcs->emit_gds_switch &&
            gds_switch_needed) {
@@ -1426,11 +1468,21 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
                }
 
                r = amdgpu_vm_bo_update(adev, bo_va, clear);
-               if (r)
-                       return r;
 
                if (unlock)
                        dma_resv_unlock(resv);
+               if (r)
+                       return r;
+
+               /* Remember evicted DMABuf imports in compute VMs for later
+                * validation
+                */
+               if (vm->is_compute_context &&
+                   bo_va->base.bo->tbo.base.import_attach &&
+                   (!bo_va->base.bo->tbo.resource ||
+                    bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
+                       amdgpu_vm_bo_evicted_user(&bo_va->base);
+
                spin_lock(&vm->status_lock);
        }
        spin_unlock(&vm->status_lock);
@@ -2196,6 +2248,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
                vm->reserved_vmid[i] = NULL;
        INIT_LIST_HEAD(&vm->evicted);
+       INIT_LIST_HEAD(&vm->evicted_user);
        INIT_LIST_HEAD(&vm->relocated);
        INIT_LIST_HEAD(&vm->moved);
        INIT_LIST_HEAD(&vm->idle);
index 4740dd65b99d6ccc107e5d63aba0f0d67d02d718..666698a571927a1584c6655c8d8d0a12bef94e30 100644 (file)
@@ -136,7 +136,11 @@ struct amdgpu_mem_stats;
 #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
 
 /* Reserve 2MB at top/bottom of address space for kernel use */
-#define AMDGPU_VA_RESERVED_SIZE                        (2ULL << 20)
+#define AMDGPU_VA_RESERVED_CSA_SIZE            (2ULL << 20)
+#define AMDGPU_VA_RESERVED_SEQ64_SIZE          (2ULL << 20)
+#define AMDGPU_VA_RESERVED_BOTTOM              (2ULL << 20)
+#define AMDGPU_VA_RESERVED_TOP                 (AMDGPU_VA_RESERVED_SEQ64_SIZE + \
+                                                AMDGPU_VA_RESERVED_CSA_SIZE)
 
 /* See vm_update_mode */
 #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
@@ -288,9 +292,12 @@ struct amdgpu_vm {
        /* Lock to protect vm_bo add/del/move on all lists of vm */
        spinlock_t              status_lock;
 
-       /* BOs who needs a validation */
+       /* Per-VM and PT BOs who needs a validation */
        struct list_head        evicted;
 
+       /* BOs for user mode queues that need a validation */
+       struct list_head        evicted_user;
+
        /* PT BOs which relocated and their parent need an update */
        struct list_head        relocated;
 
@@ -434,9 +441,10 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
                      unsigned int num_fences);
 bool amdgpu_vm_ready(struct amdgpu_vm *vm);
 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                             int (*callback)(void *p, struct amdgpu_bo *bo),
-                             void *param);
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                      struct ww_acquire_ctx *ticket,
+                      int (*callback)(void *p, struct amdgpu_bo *bo),
+                      void *param);
 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
 int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm, bool immediate);
index a6c88f2fe6e5750ea42d153dcfb855046d1a5b25..20d51f6c9bb8ce06be2d79bae3e3959bd9eeb12c 100644 (file)
@@ -1035,15 +1035,74 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
        return 0;
 }
 
+static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+                                               struct aca_bank_report *report, void *data)
+{
+       struct amdgpu_device *adev = handle->adev;
+       const char *error_str;
+       u64 status;
+       int ret, ext_error_code;
+
+       ret = aca_bank_info_decode(bank, &report->info);
+       if (ret)
+               return ret;
+
+       status = bank->regs[ACA_REG_IDX_STATUS];
+       ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+
+       error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+               xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+       if (error_str)
+               dev_info(adev->dev, "%s detected\n", error_str);
+
+       if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) ||
+           (type == ACA_ERROR_TYPE_CE && ext_error_code == 6))
+               report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+
+       return 0;
+}
+
+static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
+       .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report,
+};
+
+static const struct aca_info xgmi_v6_4_0_aca_info = {
+       .hwip = ACA_HWIP_TYPE_PCS_XGMI,
+       .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+       .bank_ops = &xgmi_v6_4_0_aca_bank_ops,
+};
+
 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
 {
+       int r;
+
        if (!adev->gmc.xgmi.supported ||
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
        amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
 
-       return amdgpu_ras_block_late_init(adev, ras_block);
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+       case IP_VERSION(6, 4, 0):
+               r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
+                                       &xgmi_v6_4_0_aca_info, NULL);
+               if (r)
+                       goto late_fini;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+
+       return r;
 }
 
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
@@ -1099,7 +1158,7 @@ static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
 
 static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
 {
-       WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+       WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
 }
 
 static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
@@ -1277,12 +1336,12 @@ static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
        err_data->ce_count += ce_cnt;
 }
 
-static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
+static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
 {
        const char *error_str;
        int ext_error_code;
 
-       ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status);
+       ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
 
        error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
                xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
@@ -1291,9 +1350,9 @@ static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdg
 
        switch (ext_error_code) {
        case 0:
-               return AMDGPU_MCA_ERROR_TYPE_UE;
+               return ACA_ERROR_TYPE_UE;
        case 6:
-               return AMDGPU_MCA_ERROR_TYPE_CE;
+               return ACA_ERROR_TYPE_CE;
        default:
                return -EINVAL;
        }
@@ -1307,22 +1366,22 @@ static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct a
        int xgmi_inst = mcm_info->die_id;
        u64 status = 0;
 
-       status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS);
-       if (!MCA_REG__STATUS__VAL(status))
+       status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
+       if (!ACA_REG__STATUS__VAL(status))
                return;
 
        switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
-       case AMDGPU_MCA_ERROR_TYPE_UE:
+       case ACA_ERROR_TYPE_UE:
                amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, NULL, 1ULL);
                break;
-       case AMDGPU_MCA_ERROR_TYPE_CE:
+       case ACA_ERROR_TYPE_CE:
                amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, NULL, 1ULL);
                break;
        default:
                break;
        }
 
-       WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+       WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
 }
 
 static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
index a33e890c70d904a2be4f0f33cf9a194e053b70be..b888613f653f6ce34fb8c36ccc0acbabad06213e 100644 (file)
@@ -62,6 +62,7 @@
 typedef struct {
        struct atom_context *ctx;
        uint32_t *ps, *ws;
+       int ps_size, ws_size;
        int ps_shift;
        uint16_t start;
        unsigned last_jump;
@@ -70,8 +71,8 @@ typedef struct {
 } atom_exec_context;
 
 int amdgpu_atom_debug;
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
 
 static uint32_t atom_arg_mask[8] =
        { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -223,7 +224,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
                (*ptr)++;
                /* get_unaligned_le32 avoids unaligned accesses from atombios
                 * tables, noticed on a DEC Alpha. */
-               val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+               if (idx < ctx->ps_size)
+                       val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+               else
+                       pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
                if (print)
                        DEBUG("PS[0x%02X,0x%04X]", idx, val);
                break;
@@ -261,7 +265,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
                        val = gctx->reg_block;
                        break;
                default:
-                       val = ctx->ws[idx];
+                       if (idx < ctx->ws_size)
+                               val = ctx->ws[idx];
+                       else
+                               pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
                }
                break;
        case ATOM_ARG_ID:
@@ -495,6 +502,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
                idx = U8(*ptr);
                (*ptr)++;
                DEBUG("PS[0x%02X]", idx);
+               if (idx >= ctx->ps_size) {
+                       pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+                       return;
+               }
                ctx->ps[idx] = cpu_to_le32(val);
                break;
        case ATOM_ARG_WS:
@@ -527,6 +538,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
                        gctx->reg_block = val;
                        break;
                default:
+                       if (idx >= ctx->ws_size) {
+                               pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+                               return;
+                       }
                        ctx->ws[idx] = val;
                }
                break;
@@ -624,7 +639,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
        else
                SDEBUG("   table: %d\n", idx);
        if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
-               r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+               r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
        if (r) {
                ctx->abort = true;
        }
@@ -1203,7 +1218,7 @@ static struct {
        atom_op_div32, ATOM_ARG_WS},
 };
 
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
 {
        int base = CU16(ctx->cmd_table + 4 + 2 * index);
        int len, ws, ps, ptr;
@@ -1225,12 +1240,16 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
        ectx.ps_shift = ps / 4;
        ectx.start = base;
        ectx.ps = params;
+       ectx.ps_size = params_size;
        ectx.abort = false;
        ectx.last_jump = 0;
-       if (ws)
+       if (ws) {
                ectx.ws = kcalloc(4, ws, GFP_KERNEL);
-       else
+               ectx.ws_size = ws;
+       } else {
                ectx.ws = NULL;
+               ectx.ws_size = 0;
+       }
 
        debug_depth++;
        while (1) {
@@ -1264,7 +1283,7 @@ free:
        return ret;
 }
 
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
 {
        int r;
 
@@ -1280,7 +1299,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *par
        /* reset divmul */
        ctx->divmul[0] = 0;
        ctx->divmul[1] = 0;
-       r = amdgpu_atom_execute_table_locked(ctx, index, params);
+       r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
        mutex_unlock(&ctx->mutex);
        return r;
 }
@@ -1552,7 +1571,7 @@ int amdgpu_atom_asic_init(struct atom_context *ctx)
 
        if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
                return 1;
-       ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+       ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
        if (ret)
                return ret;
 
index c11cf18a0f182fb49203262fe524f8094c5c8efd..b807f6639a4c67c5282e5217472c6c4add7079d4 100644 (file)
@@ -156,7 +156,7 @@ struct atom_context {
 extern int amdgpu_atom_debug;
 
 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
 int amdgpu_atom_asic_init(struct atom_context *ctx);
 void amdgpu_atom_destroy(struct atom_context *ctx);
 bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
index 10098fdd33fc4723efab3448fb4b5044d72c8788..3dfc28840a7d343b865dd3f6fb93947c15cd5e7d 100644 (file)
@@ -77,7 +77,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
                args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
                break;
        }
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
@@ -106,7 +106,7 @@ void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
                args.ucEnable = ATOM_SCALER_DISABLE;
                break;
        }
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
@@ -123,7 +123,7 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
        args.ucCRTC = amdgpu_crtc->crtc_id;
        args.ucEnable = lock;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
@@ -139,7 +139,7 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
        args.ucCRTC = amdgpu_crtc->crtc_id;
        args.ucEnable = state;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
@@ -155,7 +155,7 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
        args.ucCRTC = amdgpu_crtc->crtc_id;
        args.ucBlanking = state;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
@@ -171,7 +171,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
        args.ucDispPipeId = amdgpu_crtc->crtc_id;
        args.ucEnable = state;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
@@ -183,7 +183,7 @@ void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
 
        args.ucEnable = ATOM_INIT;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
@@ -228,7 +228,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
        args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
        args.ucCRTC = amdgpu_crtc->crtc_id;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union atom_enable_ss {
@@ -293,7 +293,7 @@ static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
        args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
        args.v3.ucEnable = enable;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union adjust_pixel_clock {
@@ -395,7 +395,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
                                        ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
                        amdgpu_atom_execute_table(adev->mode_info.atom_context,
-                                          index, (uint32_t *)&args);
+                                          index, (uint32_t *)&args, sizeof(args));
                        adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
                        break;
                case 3:
@@ -428,7 +428,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
                                args.v3.sInput.ucExtTransmitterID = 0;
 
                        amdgpu_atom_execute_table(adev->mode_info.atom_context,
-                                          index, (uint32_t *)&args);
+                                          index, (uint32_t *)&args, sizeof(args));
                        adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
                        if (args.v3.sOutput.ucRefDiv) {
                                amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
@@ -514,7 +514,7 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
                DRM_ERROR("Unknown table version %d %d\n", frev, crev);
                return;
        }
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union set_dce_clock {
@@ -544,7 +544,7 @@ u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
                        args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
                        args.v2_1.asParam.ucDCEClkType = clk_type;
                        args.v2_1.asParam.ucDCEClkSrc = clk_src;
-                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+                       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                        ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
                        break;
                default:
@@ -740,7 +740,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
                return;
        }
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
index 87c41e0e9b7c24794a4f502ee1a54f7eef324373..622634c08c7b56dce527b37c2d8b479abc58420c 100644 (file)
@@ -83,7 +83,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
        args.v2.ucDelay = delay / 10;
        args.v2.ucHPD_ID = chan->rec.hpd;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        *ack = args.v2.ucReplyStatus;
 
@@ -301,7 +301,7 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
        args.ucLaneNum = lane_num;
        args.ucStatus = 0;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
        return args.ucStatus;
 }
 
index 7672abe6c140c03946e5aeda86c70990d6af92d8..25feab188dfe69168f704914589bb65da65445fd 100644 (file)
@@ -335,7 +335,7 @@ amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
        args.ucDacStandard = ATOM_DAC1_PS2;
        args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
 }
 
@@ -432,7 +432,7 @@ amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
                break;
        }
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
@@ -732,7 +732,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
                break;
        }
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
 }
 
@@ -1136,7 +1136,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
                break;
        }
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 bool
@@ -1164,7 +1164,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
 
        args.v1.ucAction = action;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        /* wait for the panel to power up */
        if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1288,7 +1288,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
                DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
                return;
        }
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void
@@ -1633,7 +1633,7 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
                return;
        }
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 /* This only needs to be called once at startup */
@@ -1706,7 +1706,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
                                args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
                }
 
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                return true;
        } else
index af0335535f8277fb9ff1763b8c34a109b6e313bc..a6501114322fd43db12c562526b47a6f6cb23ef8 100644 (file)
@@ -86,7 +86,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
        args.ucSlaveAddr = slave_addr << 1;
        args.ucLineNumber = chan->rec.i2c_id;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        /* error */
        if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -172,5 +172,5 @@ void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr
        args.ucSlaveAddr = slave_addr;
        args.ucLineNumber = line_number;
 
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
index 567a904804bc694caed08e3072128078fb4cee4f..9c85ca6358c17e44df7b6b69208fc6ff09c8157b 100644 (file)
@@ -21,8 +21,7 @@
  *
  */
 
-static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_1[] = {
     0x00000000, // DB_RENDER_CONTROL
     0x00000000, // DB_COUNT_CONTROL
     0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
     0x00000000, // PA_SC_VPORT_ZMIN_15
     0x3f800000, // PA_SC_VPORT_ZMAX_15
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_2[] = {
     0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
     0x00000000, // PA_SC_TILE_STEERING_OVERRIDE
     0x00000000, // CP_PERFMON_CNTX_CNTL
@@ -521,15 +519,13 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
     0x00000000, // CB_MRT6_EPITCH
     0x00000000, // CB_MRT7_EPITCH
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_3[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_3[] = {
     0x00000000, // PA_CL_POINT_X_RAD
     0x00000000, // PA_CL_POINT_Y_RAD
     0x00000000, // PA_CL_POINT_SIZE
     0x00000000, // PA_CL_POINT_CULL_RAD
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_4[] = {
     0x00000000, // DB_DEPTH_CONTROL
     0x00000000, // DB_EQAA
     0x00000000, // CB_COLOR_CONTROL
@@ -688,17 +684,14 @@ static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
     0x00000000, // VGT_GS_OUT_PRIM_TYPE
     0x00000000, // IA_ENHANCE
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_5[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_5[] = {
     0x00000000, // WD_ENHANCE
     0x00000000, // VGT_PRIMITIVEID_EN
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_6[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_6[] = {
     0x00000000, // VGT_PRIMITIVEID_RESET
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_7[] = {
     0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
     0x00000000, // VGT_DRAW_PAYLOAD_CNTL
     0, // HOLE
@@ -766,8 +759,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
     0x00000000, // VGT_STRMOUT_CONFIG
     0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
 };
-static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_8[] = {
     0x00000000, // PA_SC_CENTROID_PRIORITY_0
     0x00000000, // PA_SC_CENTROID_PRIORITY_1
     0x00001000, // PA_SC_LINE_CNTL
@@ -924,8 +916,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
     0x00000000, // CB_COLOR7_DCC_BASE
     0x00000000, // CB_COLOR7_DCC_BASE_EXT
 };
-static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = {
     {gfx9_SECT_CONTEXT_def_1, 0x0000a000, 212 },
     {gfx9_SECT_CONTEXT_def_2, 0x0000a0d6, 282 },
     {gfx9_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
index 66e39cdb5cb0dc5ff1fbac239c6895b825a443ed..5fd96ddd7f0fdd233e3c1db00e5b6724c2d96ab6 100644 (file)
@@ -21,8 +21,7 @@
  *
  */
 
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
     0x00000000, // DB_RENDER_CONTROL
     0x00000000, // DB_COUNT_CONTROL
     0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const u32 si_SECT_CONTEXT_def_1[] =
     0x00000000, // PA_SC_VPORT_ZMIN_15
     0x3f800000, // PA_SC_VPORT_ZMAX_15
 };
-static const u32 si_SECT_CONTEXT_def_2[] =
-{
+static const u32 si_SECT_CONTEXT_def_2[] = {
     0x00000000, // CP_PERFMON_CNTX_CNTL
     0x00000000, // CP_RINGID
     0x00000000, // CP_VMID
@@ -511,8 +509,7 @@ static const u32 si_SECT_CONTEXT_def_2[] =
     0x00000000, // CB_BLEND6_CONTROL
     0x00000000, // CB_BLEND7_CONTROL
 };
-static const u32 si_SECT_CONTEXT_def_3[] =
-{
+static const u32 si_SECT_CONTEXT_def_3[] = {
     0x00000000, // PA_CL_POINT_X_RAD
     0x00000000, // PA_CL_POINT_Y_RAD
     0x00000000, // PA_CL_POINT_SIZE
@@ -520,8 +517,7 @@ static const u32 si_SECT_CONTEXT_def_3[] =
     0x00000000, // VGT_DMA_BASE_HI
     0x00000000, // VGT_DMA_BASE
 };
-static const u32 si_SECT_CONTEXT_def_4[] =
-{
+static const u32 si_SECT_CONTEXT_def_4[] = {
     0x00000000, // DB_DEPTH_CONTROL
     0x00000000, // DB_EQAA
     0x00000000, // CB_COLOR_CONTROL
@@ -680,16 +676,13 @@ static const u32 si_SECT_CONTEXT_def_4[] =
     0x00000000, // VGT_GS_OUT_PRIM_TYPE
     0x00000000, // IA_ENHANCE
 };
-static const u32 si_SECT_CONTEXT_def_5[] =
-{
+static const u32 si_SECT_CONTEXT_def_5[] = {
     0x00000000, // VGT_PRIMITIVEID_EN
 };
-static const u32 si_SECT_CONTEXT_def_6[] =
-{
+static const u32 si_SECT_CONTEXT_def_6[] = {
     0x00000000, // VGT_PRIMITIVEID_RESET
 };
-static const u32 si_SECT_CONTEXT_def_7[] =
-{
+static const u32 si_SECT_CONTEXT_def_7[] = {
     0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
     0, // HOLE
     0, // HOLE
@@ -924,8 +917,7 @@ static const u32 si_SECT_CONTEXT_def_7[] =
     0x00000000, // CB_COLOR7_CLEAR_WORD0
     0x00000000, // CB_COLOR7_CLEAR_WORD1
 };
-static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] = {
     {si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
     {si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
     {si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
index 587ee632a3b8fddb2a99cb54d0f246f0e1b1ffd0..221af054d8746994fc823ff2547255133c7d092e 100644 (file)
@@ -52,6 +52,7 @@
 
 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
 
 static const u32 crtc_offsets[] = {
        CRTC0_REGISTER_OFFSET,
@@ -364,6 +365,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
                                    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
                WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
+               dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq,
                               amdgpu_connector->hpd.hpd);
index f22ec27365bd251aa448d6b36b199becbe29598f..69e8b0db6cf7023d8ee3a763eda90be938c73ec5 100644 (file)
@@ -52,6 +52,7 @@
 
 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
 
 static const u32 crtc_offsets[] =
 {
@@ -388,6 +389,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
                                    AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
                WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
 
+               dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
index 4dbe9b3259b50dc62bb131442010f235e587759e..60d40201fdd17730f43ce9fe3394958c5a8cac64 100644 (file)
@@ -273,6 +273,21 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
+static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
+                                int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+       tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+       WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
 /**
  * dce_v6_0_hpd_init - hpd setup callback.
  *
@@ -312,6 +327,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
                        continue;
                }
 
+               dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
@@ -3089,7 +3105,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
 {
-       uint32_t disp_int, mask, tmp;
+       uint32_t disp_int, mask;
        unsigned hpd;
 
        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3102,9 +3118,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
        mask = interrupt_status_offsets[hpd].hpd;
 
        if (disp_int & mask) {
-               tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
-               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-               WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+               dce_v6_0_hpd_int_ack(adev, hpd);
                schedule_delayed_work(&adev->hotplug_work, 0);
                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
        }
index 05bcce23385ec08e2ec8e424e865382794de405b..5a5fcc45e452102bcfa83772b4ab3d85337b5e36 100644 (file)
@@ -265,6 +265,21 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
        WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
 }
 
+static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
+                                int hpd)
+{
+       u32 tmp;
+
+       if (hpd >= adev->mode_info.num_hpd) {
+               DRM_DEBUG("invalid hdp %d\n", hpd);
+               return;
+       }
+
+       tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+       tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+       WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
 /**
  * dce_v8_0_hpd_init - hpd setup callback.
  *
@@ -304,6 +319,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
                        continue;
                }
 
+               dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
                dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
                amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
        }
@@ -3177,7 +3193,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
 {
-       uint32_t disp_int, mask, tmp;
+       uint32_t disp_int, mask;
        unsigned hpd;
 
        if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -3190,9 +3206,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
        mask = interrupt_status_offsets[hpd].hpd;
 
        if (disp_int & mask) {
-               tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
-               tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
-               WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+               dce_v8_0_hpd_int_ack(adev, hpd);
                schedule_delayed_work(&adev->hotplug_work, 0);
                DRM_DEBUG("IH: HPD%d\n", hpd + 1);
        }
index dcdecb18b2306b84ca1b18852837409776707c69..b02d63328f1cd937063cc031f553c4c44e2d4e9c 100644 (file)
@@ -7947,7 +7947,7 @@ static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
        WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
-static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
 {
        amdgpu_gfx_off_ctrl(adev, false);
 
index 4f3bfdc75b37d66cbc5d78a5525a8a905eb1e733..2fb1342d5bd93b3659f31d7ae7207c44d3d76515 100644 (file)
@@ -727,7 +727,7 @@ static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
 
        /* init spm vmid with 0xf */
        if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
 
        return 0;
 }
@@ -5027,7 +5027,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
        return 0;
 }
 
-static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
 {
        u32 data;
 
@@ -5041,6 +5041,14 @@ static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
        WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
 
        amdgpu_gfx_off_ctrl(adev, true);
+
+       if (ring
+               && amdgpu_sriov_is_pp_one_vf(adev)
+               && ((ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+                       || (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE))) {
+               uint32_t reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
+               amdgpu_ring_emit_wreg(ring, reg, data);
+       }
 }
 
 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
@@ -6104,7 +6112,8 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
        .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
        .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
-       .emit_frame_size = /* totally 242 maximum if 16 IBs */
+       .emit_frame_size = /* totally 247 maximum if 16 IBs */
+               5 + /* update_spm_vmid */
                5 + /* COND_EXEC */
                9 + /* SET_Q_PREEMPTION_MODE */
                7 + /* PIPELINE_SYNC */
@@ -6154,6 +6163,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
        .get_wptr = gfx_v11_0_ring_get_wptr_compute,
        .set_wptr = gfx_v11_0_ring_set_wptr_compute,
        .emit_frame_size =
+               5 + /* update_spm_vmid */
                20 + /* gfx_v11_0_ring_emit_gds_switch */
                7 + /* gfx_v11_0_ring_emit_hdp_flush */
                5 + /* hdp invalidate */
index 26d6286d86c9991f98c1ace2b9becb54eadeeae3..9e7ce1e6bc0613cda09a142e747344ea96877220 100644 (file)
@@ -69,7 +69,7 @@ static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
                amdgpu_ras_interrupt_dispatch(adev, &ih_data);
        } else {
                if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
-                       adev->virt.ops->ras_poison_handler(adev);
+                       adev->virt.ops->ras_poison_handler(adev, ras_if->block);
                else
                        dev_warn(adev->dev,
                                "No ras_poison_handler interface in SRIOV for %s!\n", ras_if->name);
index c2faf6b4c2fced463cc24598cf10c9775a9663ee..86a4865b1ae54400cb3be21c8e68d84dde618183 100644 (file)
@@ -3274,7 +3274,7 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
 
        /* init spm vmid with 0xf */
        if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
 
        return 0;
 }
@@ -3500,7 +3500,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        return 0;
 }
 
-static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
 {
        u32 data;
 
index 1943beb135c4c2923c211c5727afbd16141ac718..ea174b76ee7008439ba3a6f03c5076a1bc2b3a77 100644 (file)
@@ -1288,7 +1288,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
 
        /* init spm vmid with 0xf */
        if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
 
        return 0;
 }
@@ -5579,7 +5579,7 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
        }
 }
 
-static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
 {
        u32 data;
 
index 69c500910746018281471ad6d27350aaf2461702..169d45268ef6d50bd5b191b91cc4fa4bb086649f 100644 (file)
@@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
 
        gfx_v9_0_cp_gfx_enable(adev, true);
 
+       /* Now only limit the quirk on the APU gfx9 series and already
+        * confirmed that the APU gfx10/gfx11 needn't such update.
+        */
+       if (adev->flags & AMD_IS_APU &&
+                       adev->in_s3 && !adev->suspend_complete) {
+               DRM_INFO(" Will skip the CSB packet resubmit\n");
+               return 0;
+       }
        r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
        if (r) {
                DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
@@ -4894,7 +4902,7 @@ static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
                WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
 }
 
-static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned int vmid)
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int vmid)
 {
        amdgpu_gfx_off_ctrl(adev, false);
 
index bc8416afb62c5d5530ebea5b828b06f2974ed47c..f53b379d897141607f34b0ad47351cfda835c9b2 100644 (file)
@@ -970,8 +970,9 @@ static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
 }
 
-static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
-       { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = {
+       SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32
+};
 
 static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
 {
index 131cddbdda0dc11716205307e51d72aa72b271bf..aace4594a603b85450450293036c618f0dc776fc 100644 (file)
@@ -38,6 +38,7 @@
 
 #include "gfx_v9_4_3.h"
 #include "amdgpu_xcp.h"
+#include "amdgpu_aca.h"
 
 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
@@ -48,6 +49,10 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
 
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400       /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400       /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400       /* SMN XCD XCD0 */
+
 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
 
 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
@@ -675,6 +680,66 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
        .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
 };
 
+static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle,
+                                              struct aca_bank *bank, enum aca_error_type type,
+                                              struct aca_bank_report *report, void *data)
+{
+       u64 status, misc0;
+       u32 instlo;
+       int ret;
+
+       status = bank->regs[ACA_REG_IDX_STATUS];
+       if ((type == ACA_ERROR_TYPE_UE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+           (type == ACA_ERROR_TYPE_CE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+               ret = aca_bank_info_decode(bank, &report->info);
+               if (ret)
+                       return ret;
+
+               /* NOTE: overwrite info.die_id with xcd id for gfx */
+               instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+               instlo &= GENMASK(31, 1);
+               report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
+
+               misc0 = bank->regs[ACA_REG_IDX_MISC0];
+               report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+       }
+
+       return 0;
+}
+
+static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+                                        enum aca_error_type type, void *data)
+{
+       u32 instlo;
+
+       instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+       instlo &= GENMASK(31, 1);
+       switch (instlo) {
+       case mmSMNAID_XCD0_MCA_SMU:
+       case mmSMNAID_XCD1_MCA_SMU:
+       case mmSMNXCD_XCD0_MCA_SMU:
+               return true;
+       default:
+               break;
+       }
+
+       return false;
+}
+
+static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
+       .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report,
+       .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
+};
+
+static const struct aca_info gfx_v9_4_3_aca_info = {
+       .hwip = ACA_HWIP_TYPE_SMU,
+       .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+       .bank_ops = &gfx_v9_4_3_aca_bank_ops,
+};
+
 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
 {
        u32 gb_addr_config;
@@ -1109,7 +1174,7 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
 {
        /* init spm vmid with 0xf */
        if (adev->gfx.rlc.funcs->update_spm_vmid)
-               adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+               adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
 
        return 0;
 }
@@ -1320,7 +1385,7 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
        return 0;
 }
 
-static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                                       unsigned vmid)
 {
        u32 reg, data;
@@ -4242,9 +4307,32 @@ struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
        .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
 };
 
+static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r;
+
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
+                               &gfx_v9_4_3_aca_info,
+                               NULL);
+       if (r)
+               goto late_fini;
+
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+
+       return r;
+}
+
 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
        .ras_block = {
                .hw_ops = &gfx_v9_4_3_ras_ops,
+               .ras_late_init = &gfx_v9_4_3_ras_late_init,
        },
        .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
 };
index 6c51856088546faed3c2e3d9376f8c23d54ba554..db89d13bd80db6dbe769b2d59fc736a4b51f63fb 100644 (file)
@@ -262,16 +262,17 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        /* flush hdp cache */
        adev->hdp.funcs->flush_hdp(adev, NULL);
 
-       /* For SRIOV run time, driver shouldn't access the register through MMIO
-        * Directly use kiq to do the vm invalidation instead
+       /* This is necessary for SRIOV as well as for GFXOFF to function
+        * properly under bare metal
         */
        if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
-               amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid, GET_INST(GC, 0));
+               amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+                                                1 << vmid, GET_INST(GC, 0));
                return;
        }
 
+       /* This path is needed before KIQ/MES/GFXOFF are set up */
        hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
 
        spin_lock(&adev->gmc.invalidate_lock);
index c9c653cfc765b8b88e5ab1f77cefcbbce38ff79c..6c68135cac9f021f522089345043474882b00d1c 100644 (file)
@@ -223,16 +223,17 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        /* flush hdp cache */
        adev->hdp.funcs->flush_hdp(adev, NULL);
 
-       /* For SRIOV run time, driver shouldn't access the register through MMIO
-        * Directly use kiq to do the vm invalidation instead
+       /* This is necessary for SRIOV as well as for GFXOFF to function
+        * properly under bare metal
         */
        if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
-               amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid, GET_INST(GC, 0));
+               amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+                                                1 << vmid, GET_INST(GC, 0));
                return;
        }
 
+       /* This path is needed before KIQ/MES/GFXOFF are set up */
        hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ? GC_HWIP : MMHUB_HWIP;
 
        spin_lock(&adev->gmc.invalidate_lock);
index 59d9215e555629577b43afcba38e945f5ce90bcd..23b478639921a39ecd67fbcdc183fde331585ea5 100644 (file)
@@ -435,9 +435,10 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
        WREG32(mmVM_PRT_CNTL, tmp);
 
        if (enable) {
-               uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+               uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+                       AMDGPU_GPU_PAGE_SHIFT;
                uint32_t high = adev->vm_manager.max_pfn -
-                       (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+                       (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 
                WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
                WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
index 45a2f8e031a2c9920f3a68ae690731357f33da0c..3da7b6a2b00d29113bacf770c9918efa9f2408ee 100644 (file)
@@ -563,9 +563,10 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
        WREG32(mmVM_PRT_CNTL, tmp);
 
        if (enable) {
-               uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+               uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+                       AMDGPU_GPU_PAGE_SHIFT;
                uint32_t high = adev->vm_manager.max_pfn -
-                       (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+                       (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 
                WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
                WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
index 4422b27a3cc2fc069a6ecb3e6d8b9630e9c173cc..969a9e8671703f51b8b79b4ea486822822e604fd 100644 (file)
@@ -777,9 +777,10 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
        WREG32(mmVM_PRT_CNTL, tmp);
 
        if (enable) {
-               uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
+               uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >>
+                       AMDGPU_GPU_PAGE_SHIFT;
                uint32_t high = adev->vm_manager.max_pfn -
-                       (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
+                       (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT);
 
                WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
                WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
index 40a00ea0009f6aca367c68a43e11ae608f71d416..d442ae85162db5ff8c269969efa4b9c963d33382 100644 (file)
@@ -829,23 +829,25 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
        ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
-       /* This is necessary for a HW workaround under SRIOV as well
-        * as GFXOFF under bare metal
-        */
        if (vmhub >= AMDGPU_MMHUB0(0))
                inst = GET_INST(GC, 0);
        else
                inst = vmhub;
+
+       /* This is necessary for SRIOV as well as for GFXOFF to function
+        * properly under bare metal
+        */
        if (adev->gfx.kiq[inst].ring.sched.ready &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
                uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
-               amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                                                  1 << vmid, inst);
+               amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
+                                                1 << vmid, inst);
                return;
        }
 
+       /* This path is needed before KIQ/MES/GFXOFF are set up */
        spin_lock(&adev->gmc.invalidate_lock);
 
        /*
@@ -1947,14 +1949,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
 
 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
 {
-       static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
-       u32 vram_info;
-
-       /* Only for dGPU, vendor informaton is reliable */
-       if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) {
-               vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
-               adev->gmc.vram_vendor = vram_info & 0xF;
-       }
        adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
        adev->gmc.vram_width = 128 * 64;
 }
index e67a337457edad3b12bc1c822f46c00e8fd32c9d..99cd49ee8ef6e48c3488888622af9256a0bd44dc 100644 (file)
@@ -551,7 +551,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int ret;
 
-       if(state == adev->jpeg.cur_state)
+       if (state == adev->jpeg.cur_state)
                return 0;
 
        if (state == AMD_PG_STATE_GATE)
@@ -559,7 +559,7 @@ static int jpeg_v2_5_set_powergating_state(void *handle,
        else
                ret = jpeg_v2_5_start(adev);
 
-       if(!ret)
+       if (!ret)
                adev->jpeg.cur_state = state;
 
        return ret;
@@ -754,8 +754,7 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_5_ip_block = {
                .type = AMD_IP_BLOCK_TYPE_JPEG,
                .major = 2,
                .minor = 5,
@@ -763,8 +762,7 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
                .funcs = &jpeg_v2_5_ip_funcs,
 };
 
-const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
-{
+const struct amdgpu_ip_block_version jpeg_v2_6_ip_block = {
                .type = AMD_IP_BLOCK_TYPE_JPEG,
                .major = 2,
                .minor = 6,
index bc38b90f8cf88e8fee393e8e52214ac72f0aa8a6..88ea58d5c4abf5b0f20abff28f9833f402e4b016 100644 (file)
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
        return ret;
 }
 
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
-       .set = jpeg_v4_0_set_interrupt_state,
        .process = jpeg_v4_0_process_interrupt,
 };
 
index 6ede85b28cc8c0bbfd6a7e94c6a3d1a677e958bf..78b74daf4eebfc30f04ee4aaf6d0ff92891ff30f 100644 (file)
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
                        RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
                        jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
        }
-       amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
 
        return 0;
 }
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
        return ret;
 }
 
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
-                                       struct amdgpu_irq_src *source,
-                                       unsigned type,
-                                       enum amdgpu_interrupt_state state)
-{
-       return 0;
-}
-
 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
 }
 
 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
-       .set = jpeg_v4_0_5_set_interrupt_state,
        .process = jpeg_v4_0_5_process_interrupt,
 };
 
index fb53aacdcba20f01019a20d63c7bb07d60e1e8d1..c0fc44cdd6581c1d8722007b2ec474aa79c39030 100644 (file)
@@ -33,6 +33,7 @@
 
 #define regVM_L2_CNTL3_DEFAULT 0x80100007
 #define regVM_L2_CNTL4_DEFAULT 0x000000c1
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
 
 static u64 mmhub_v1_8_get_fb_location(struct amdgpu_device *adev)
 {
@@ -705,8 +706,94 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = {
        .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count,
 };
 
+static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle,
+                                              struct aca_bank *bank, enum aca_error_type type,
+                                              struct aca_bank_report *report, void *data)
+{
+       u64 status, misc0;
+       int ret;
+
+       status = bank->regs[ACA_REG_IDX_STATUS];
+       if ((type == ACA_ERROR_TYPE_UE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+           (type == ACA_ERROR_TYPE_CE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+               ret = aca_bank_info_decode(bank, &report->info);
+               if (ret)
+                       return ret;
+
+               misc0 = bank->regs[ACA_REG_IDX_MISC0];
+               report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+       }
+
+       return 0;
+}
+
+/* reference to smu driver if header file */
+static int mmhub_v1_8_err_codes[] = {
+       0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+       5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+       10, /* CODE_UTCL2_ROUTER */
+       11, /* CODE_VML2 */
+       12, /* CODE_VML2_WALKER */
+       13, /* CODE_MMCANE */
+};
+
+static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+                                        enum aca_error_type type, void *data)
+{
+       u32 instlo;
+
+       instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+       instlo &= GENMASK(31, 1);
+
+       if (instlo != mmSMNAID_AID0_MCA_SMU)
+               return false;
+
+       if (aca_bank_check_error_codes(handle->adev, bank,
+                                      mmhub_v1_8_err_codes,
+                                      ARRAY_SIZE(mmhub_v1_8_err_codes)))
+               return false;
+
+       return true;
+}
+
+static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = {
+       .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report,
+       .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid,
+};
+
+static const struct aca_info mmhub_v1_8_aca_info = {
+       .hwip = ACA_HWIP_TYPE_SMU,
+       .mask = ACA_ERROR_UE_MASK,
+       .bank_ops = &mmhub_v1_8_aca_bank_ops,
+};
+
+static int mmhub_v1_8_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r;
+
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__MMHUB,
+                               &mmhub_v1_8_aca_info, NULL);
+       if (r)
+               goto late_fini;
+
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+
+       return r;
+}
+
 struct amdgpu_mmhub_ras mmhub_v1_8_ras = {
        .ras_block = {
                .hw_ops = &mmhub_v1_8_ras_hw_ops,
+               .ras_late_init = mmhub_v1_8_ras_late_init,
        },
 };
index 63725b2ebc03733f607aaf9dd9f8a649f75d2dae..a2bd2c3b1ef9c4a4dfac7ab131a6588429d00856 100644 (file)
@@ -404,7 +404,8 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
        return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
 }
 
-static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev,
+                                       enum amdgpu_ras_block block)
 {
        xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
 }
index 6a68ee946f1cc3f58862a8c67fd3c53f4fdb092b..77f5b55decf96032ce1794693a3d51f1481f6009 100644 (file)
@@ -152,14 +152,14 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
        xgpu_nv_mailbox_set_valid(adev, false);
 }
 
-static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
-                                       enum idh_request req)
+static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
+                       enum idh_request req, u32 data1, u32 data2, u32 data3)
 {
        int r, retry = 1;
        enum idh_event event = -1;
 
 send_request:
-       xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
+       xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
 
        switch (req) {
        case IDH_REQ_GPU_INIT_ACCESS:
@@ -170,6 +170,10 @@ send_request:
        case IDH_REQ_GPU_INIT_DATA:
                event = IDH_REQ_GPU_INIT_DATA_READY;
                break;
+       case IDH_RAS_POISON:
+               if (data1 != 0)
+                       event = IDH_RAS_POISON_READY;
+               break;
        default:
                break;
        }
@@ -206,6 +210,13 @@ send_request:
        return 0;
 }
 
+static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
+                                       enum idh_request req)
+{
+       return xgpu_nv_send_access_requests_with_param(adev,
+                                               req, 0, 0, 0);
+}
+
 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
 {
        int ret, i = 0;
@@ -424,9 +435,17 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
        amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 }
 
-static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
+               enum amdgpu_ras_block block)
 {
-       xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+       if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+               xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+       } else {
+               amdgpu_virt_fini_data_exchange(adev);
+               xgpu_nv_send_access_requests_with_param(adev,
+                                       IDH_RAS_POISON, block, 0, 0);
+               amdgpu_virt_init_data_exchange(adev);
+       }
 }
 
 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
index d0221ce087690e9ffaa8c24a0ccfee8226c0b80a..1e8fd90cab434724a04aa2ce45f8b44b0ce37865 100644 (file)
@@ -51,6 +51,7 @@ enum idh_event {
        IDH_FAIL,
        IDH_QUERY_ALIVE,
        IDH_REQ_GPU_INIT_DATA_READY,
+       IDH_RAS_POISON_READY,
 
        IDH_TEXT_MESSAGE = 255,
 };
index de93614726c9a48ccd398c6ac5570a8844fb7618..4178f4e5dad7329610958834fb22b8af9d821139 100644 (file)
@@ -728,8 +728,7 @@ static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
                adev->irq.ih_funcs = &navi10_ih_funcs;
 }
 
-const struct amdgpu_ip_block_version navi10_ih_ip_block =
-{
+const struct amdgpu_ip_block_version navi10_ih_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_IH,
        .major = 5,
        .minor = 0,
index e90f33780803458c32843f2599c07e4f598ca659..b4723d68eab0f939ba057b67cf7712ddb512c8c8 100644 (file)
@@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
        u32 inst_mask;
        int i;
 
+       if (amdgpu_sriov_vf(adev))
+               adev->rmmio_remap.reg_offset =
+                       SOC15_REG_OFFSET(
+                               NBIO, 0,
+                               regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+                       << 2;
        WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
                0xff & ~(adev->gfx.xcc_mask));
 
index df1844d0800f2e5d9bcbc9d546ef3e52e060b733..722b6066ce07c5330fa246ec81e56cd41893e064 100644 (file)
@@ -27,6 +27,7 @@
 #include "amdgpu_ucode.h"
 #include "soc15_common.h"
 #include "psp_v13_0.h"
+#include "amdgpu_ras.h"
 
 #include "mp/mp_13_0_2_offset.h"
 #include "mp/mp_13_0_2_sh_mask.h"
@@ -187,11 +188,18 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
 static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
+       int ret;
 
        if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) {
-               psp_v13_0_wait_for_vmbx_ready(psp);
+               ret = psp_v13_0_wait_for_vmbx_ready(psp);
+               if (ret)
+                       amdgpu_ras_query_boot_status(adev, 4);
+
+               ret = psp_v13_0_wait_for_bootloader(psp);
+               if (ret)
+                       amdgpu_ras_query_boot_status(adev, 4);
 
-               return psp_v13_0_wait_for_bootloader(psp);
+               return ret;
        }
 
        return 0;
@@ -763,81 +771,28 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
        return 0;
 }
 
-
-static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
-                                          uint32_t inst,
-                                          uint32_t boot_error)
-{
-       uint32_t socket_id;
-       uint32_t aid_id;
-       uint32_t hbm_id;
-       uint32_t reg_data;
-
-       socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
-       aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
-       hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
-
-       reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
-       dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
-                socket_id, aid_id, reg_data);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
-               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
-                        socket_id, aid_id, hbm_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
-               dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
-                        socket_id, aid_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
-               dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
-                        socket_id, aid_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
-               dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
-                        socket_id, aid_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
-               dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
-                        socket_id, aid_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
-               dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
-                        socket_id, aid_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
-               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
-                        socket_id, aid_id, hbm_id);
-
-       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
-               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
-                        socket_id, aid_id, hbm_id);
-}
-
-static int psp_v13_0_query_boot_status(struct psp_context *psp)
+static bool psp_v13_0_get_ras_capability(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
-       int inst_mask = adev->aid_mask;
-       uint32_t reg_data;
-       uint32_t i;
-       int ret = 0;
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       u32 reg_data;
 
-       if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
-               return 0;
+       /* query ras cap should be done from host side */
+       if (amdgpu_sriov_vf(adev))
+               return false;
 
-       if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
-               return 0;
+       if (!con)
+               return false;
 
-       for_each_inst(i, inst_mask) {
-               reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
-               if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
-                       psp_v13_0_boot_error_reporting(adev, i, reg_data);
-                       ret = -EINVAL;
-                       break;
-               }
+       if ((amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) &&
+           (!(adev->flags & AMD_IS_APU))) {
+               reg_data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_127);
+               adev->ras_hw_enabled = (reg_data & GENMASK_ULL(23, 0));
+               con->poison_supported = ((reg_data & GENMASK_ULL(24, 24)) >> 24) ? true : false;
+               return true;
+       } else {
+               return false;
        }
-
-       return ret;
 }
 
 static const struct psp_funcs psp_v13_0_funcs = {
@@ -862,7 +817,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
        .update_spirom = psp_v13_0_update_spirom,
        .vbflash_stat = psp_v13_0_vbflash_status,
        .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
-       .query_boot_status = psp_v13_0_query_boot_status,
+       .get_ras_capability = psp_v13_0_get_ras_capability,
 };
 
 void psp_v13_0_set_psp_funcs(struct psp_context *psp)
index 8d5d86675a7fea5e4e5e8bc4e49cdbb580ae17a6..07e19caf2bc10dd30646e5bfd84acd7c722c820c 100644 (file)
@@ -57,22 +57,19 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev);
 MODULE_FIRMWARE("amdgpu/topaz_sdma.bin");
 MODULE_FIRMWARE("amdgpu/topaz_sdma1.bin");
 
-static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
-{
+static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = {
        SDMA0_REGISTER_OFFSET,
        SDMA1_REGISTER_OFFSET
 };
 
-static const u32 golden_settings_iceland_a11[] =
-{
+static const u32 golden_settings_iceland_a11[] = {
        mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
        mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
        mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
        mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
 };
 
-static const u32 iceland_mgcg_cgcg_init[] =
-{
+static const u32 iceland_mgcg_cgcg_init[] = {
        mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
        mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
 };
@@ -142,7 +139,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
        case CHIP_TOPAZ:
                chip_name = "topaz";
                break;
-       default: BUG();
+       default:
+               BUG();
        }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -1258,8 +1256,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
        adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
-const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
-{
+const struct amdgpu_ip_block_version sdma_v2_4_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 2,
        .minor = 4,
index 2d688dca26bedba5018bd41c76fb09a65a38cd66..fec5a3d1c4bc2035e963e47672df18ee6880462e 100644 (file)
@@ -45,6 +45,8 @@
 
 MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
 
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400
+
 #define WREG32_SDMA(instance, offset, value) \
        WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
 #define RREG32_SDMA(instance, offset) \
@@ -2204,9 +2206,79 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
        .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
 };
 
+static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle,
+                                               struct aca_bank *bank, enum aca_error_type type,
+                                               struct aca_bank_report *report, void *data)
+{
+       u64 status, misc0;
+       int ret;
+
+       status = bank->regs[ACA_REG_IDX_STATUS];
+       if ((type == ACA_ERROR_TYPE_UE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) ||
+           (type == ACA_ERROR_TYPE_CE &&
+            ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) {
+
+               ret = aca_bank_info_decode(bank, &report->info);
+               if (ret)
+                       return ret;
+
+               misc0 = bank->regs[ACA_REG_IDX_MISC0];
+               report->count[type] = ACA_REG__MISC0__ERRCNT(misc0);
+       }
+
+       return 0;
+}
+
+/* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
+
+static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
+                                         enum aca_error_type type, void *data)
+{
+       u32 instlo;
+
+       instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
+       instlo &= GENMASK(31, 1);
+
+       if (instlo != mmSMNAID_AID0_MCA_SMU)
+               return false;
+
+       if (aca_bank_check_error_codes(handle->adev, bank,
+                                      sdma_v4_4_2_err_codes,
+                                      ARRAY_SIZE(sdma_v4_4_2_err_codes)))
+               return false;
+
+       return true;
+}
+
+static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
+       .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report,
+       .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
+};
+
+static const struct aca_info sdma_v4_4_2_aca_info = {
+       .hwip = ACA_HWIP_TYPE_SMU,
+       .mask = ACA_ERROR_UE_MASK,
+       .bank_ops = &sdma_v4_4_2_aca_bank_ops,
+};
+
+static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r;
+
+       r = amdgpu_sdma_ras_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA,
+                                  &sdma_v4_4_2_aca_info, NULL);
+}
+
 static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
        .ras_block = {
                .hw_ops = &sdma_v4_4_2_ras_hw_ops,
+               .ras_late_init = sdma_v4_4_2_ras_late_init,
        },
 };
 
index 15033efec2bac0148e5d9381027a6ee3e70334b7..c64c01e2944a2e4c1f4177355771a1b47cfcc666 100644 (file)
@@ -1298,10 +1298,32 @@ static int soc15_common_suspend(void *handle)
        return soc15_common_hw_fini(adev);
 }
 
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+       u32 sol_reg;
+
+       sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+       /* Will reset for the following suspend abort cases.
+        * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+        * 2) S3 suspend abort and TOS already launched.
+        */
+       if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+                       !adev->suspend_complete &&
+                       sol_reg)
+               return true;
+
+       return false;
+}
+
 static int soc15_common_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (soc15_need_reset_on_resume(adev)) {
+               dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+               soc15_asic_reset(adev);
+       }
        return soc15_common_hw_init(adev);
 }
 
index 879bb7af297c7bda447b199a2165a03b4c691562..056d4df8fa1ff4a2aa3a2e7ac9733c29b4897ad4 100644 (file)
@@ -36,6 +36,9 @@ enum ras_command {
        TA_RAS_COMMAND__ENABLE_FEATURES = 0,
        TA_RAS_COMMAND__DISABLE_FEATURES,
        TA_RAS_COMMAND__TRIGGER_ERROR,
+       TA_RAS_COMMAND__QUERY_BLOCK_INFO,
+       TA_RAS_COMMAND__QUERY_SUB_BLOCK_INFO,
+       TA_RAS_COMMAND__QUERY_ADDRESS,
 };
 
 enum ta_ras_status {
@@ -105,6 +108,11 @@ enum ta_ras_error_type {
        TA_RAS_ERROR__POISON                    = 8,
 };
 
+enum ta_ras_address_type {
+       TA_RAS_MCA_TO_PA,
+       TA_RAS_PA_TO_MCA,
+};
+
 /* Input/output structures for RAS commands */
 /**********************************************************/
 
@@ -133,12 +141,38 @@ struct ta_ras_init_flags {
        uint8_t channel_dis_num;
 };
 
+struct ta_ras_mca_addr {
+       uint64_t err_addr;
+       uint32_t ch_inst;
+       uint32_t umc_inst;
+       uint32_t node_inst;
+};
+
+struct ta_ras_phy_addr {
+       uint64_t pa;
+       uint32_t bank;
+       uint32_t channel_idx;
+};
+
+struct ta_ras_query_address_input {
+       enum ta_ras_address_type addr_type;
+       struct ta_ras_mca_addr ma;
+       struct ta_ras_phy_addr pa;
+};
+
 struct ta_ras_output_flags {
        uint8_t ras_init_success_flag;
        uint8_t err_inject_switch_disable_flag;
        uint8_t reg_access_failure_flag;
 };
 
+struct ta_ras_query_address_output {
+       /* don't use the flags here */
+       struct ta_ras_output_flags flags;
+       struct ta_ras_mca_addr ma;
+       struct ta_ras_phy_addr pa;
+};
+
 /* Common input structure for RAS callbacks */
 /**********************************************************/
 union ta_ras_cmd_input {
@@ -146,12 +180,14 @@ union ta_ras_cmd_input {
        struct ta_ras_enable_features_input     enable_features;
        struct ta_ras_disable_features_input    disable_features;
        struct ta_ras_trigger_error_input       trigger_error;
+       struct ta_ras_query_address_input       address;
 
        uint32_t reserve_pad[256];
 };
 
 union ta_ras_cmd_output {
        struct ta_ras_output_flags flags;
+       struct ta_ras_query_address_output address;
 
        uint32_t reserve_pad[256];
 };
index 7458a218e89db1dc98211c83c864e29cd81ab7af..14ef7a24be7b567424c0cfecfdc2bf822923abc6 100644 (file)
@@ -89,12 +89,28 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
                umc_v12_0_reset_error_count_per_channel, NULL);
 }
 
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
+{
+       dev_info(adev->dev,
+               "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
+               mc_umc_status,
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
+               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
+       );
+
+       return (amdgpu_ras_is_poison_mode_supported(adev) &&
+               (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+               (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
+}
+
 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 {
-       if (amdgpu_ras_is_poison_mode_supported(adev) &&
-           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
-           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
-               return true;
+       if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
+               return false;
 
        return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
                (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
@@ -104,9 +120,7 @@ bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_um
 
 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
 {
-       if (amdgpu_ras_is_poison_mode_supported(adev) &&
-           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
-           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1))
+       if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
                return false;
 
        return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -119,9 +133,10 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_
                !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
 }
 
-static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
+static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
                                                   uint64_t umc_reg_offset,
-                                                  unsigned long *error_count)
+                                                  unsigned long *error_count,
+                                                  check_error_type_func error_type_func)
 {
        uint64_t mc_umc_status;
        uint64_t mc_umc_status_addr;
@@ -129,31 +144,11 @@ static void umc_v12_0_query_correctable_error_count(struct amdgpu_device *adev,
        mc_umc_status_addr =
                SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
 
-       /* Rely on MCUMC_STATUS for correctable error counter
-        * MCUMC_STATUS is a 64 bit register
-        */
+       /* Check MCUMC_STATUS */
        mc_umc_status =
                RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
 
-       if (umc_v12_0_is_correctable_error(adev, mc_umc_status))
-               *error_count += 1;
-}
-
-static void umc_v12_0_query_uncorrectable_error_count(struct amdgpu_device *adev,
-                                                     uint64_t umc_reg_offset,
-                                                     unsigned long *error_count)
-{
-       uint64_t mc_umc_status;
-       uint64_t mc_umc_status_addr;
-
-       mc_umc_status_addr =
-               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
-
-       /* Check the MCUMC_STATUS. */
-       mc_umc_status =
-               RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
-
-       if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status))
+       if (error_type_func(adev, mc_umc_status))
                *error_count += 1;
 }
 
@@ -162,7 +157,7 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
                                        uint32_t ch_inst, void *data)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)data;
-       unsigned long ue_count = 0, ce_count = 0;
+       unsigned long ue_count = 0, ce_count = 0, de_count = 0;
 
        /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
         * which can be used as die ID directly */
@@ -174,11 +169,16 @@ static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
        uint64_t umc_reg_offset =
                get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
 
-       umc_v12_0_query_correctable_error_count(adev, umc_reg_offset, &ce_count);
-       umc_v12_0_query_uncorrectable_error_count(adev, umc_reg_offset, &ue_count);
+       umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+                                           &ce_count, umc_v12_0_is_correctable_error);
+       umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+                                           &ue_count, umc_v12_0_is_uncorrectable_error);
+       umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
+                                           &de_count, umc_v12_0_is_deferred_error);
 
        amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
        amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
+       amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
 
        return 0;
 }
@@ -203,14 +203,14 @@ static bool umc_v12_0_bit_wise_xor(uint32_t val)
        return result;
 }
 
-static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
-                                           struct ras_err_data *err_data, uint64_t err_addr,
-                                           uint32_t ch_inst, uint32_t umc_inst,
-                                           uint32_t node_inst)
+static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
+                                       uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
+                                       uint32_t node_inst,
+                                       struct ta_ras_query_address_output *addr_out)
 {
        uint32_t channel_index, i;
-       uint64_t soc_pa, na, retired_page, column;
-       uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row, row_xor;
+       uint64_t na, soc_pa;
+       uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
        uint32_t bank0, bank1, bank2, bank3, bank;
 
        bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
@@ -260,12 +260,44 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
        /* the umc channel bits are not original values, they are hashed */
        UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
 
+       addr_out->pa.pa = soc_pa;
+       addr_out->pa.bank = bank;
+       addr_out->pa.channel_idx = channel_index;
+}
+
+static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
+                                           struct ras_err_data *err_data, uint64_t err_addr,
+                                           uint32_t ch_inst, uint32_t umc_inst,
+                                           uint32_t node_inst)
+{
+       uint32_t col, row, row_xor, bank, channel_index;
+       uint64_t soc_pa, retired_page, column;
+       struct ta_ras_query_address_input addr_in;
+       struct ta_ras_query_address_output addr_out;
+
+       addr_in.addr_type = TA_RAS_MCA_TO_PA;
+       addr_in.ma.err_addr = err_addr;
+       addr_in.ma.ch_inst = ch_inst;
+       addr_in.ma.umc_inst = umc_inst;
+       addr_in.ma.node_inst = node_inst;
+
+       if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out))
+               /* fallback to old path if fail to get pa from psp */
+               umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst,
+                               node_inst, &addr_out);
+
+       soc_pa = addr_out.pa.pa;
+       bank = addr_out.pa.bank;
+       channel_index = addr_out.pa.channel_idx;
+
+       col = (err_addr >> 1) & 0x1fULL;
+       row = (err_addr >> 10) & 0x3fffULL;
+       row_xor = row ^ (0x1ULL << 13);
        /* clear [C3 C2] in soc physical address */
        soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
        /* clear [C4] in soc physical address */
        soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
 
-       row_xor = row ^ (0x1ULL << 13);
        /* loop for all possibilities of [C4 C3 C2] */
        for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
                retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
@@ -316,10 +348,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
        }
 
        /* calculate error address if ue error is detected */
-       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
-           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1) {
-
+       if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
                mc_umc_addrt0 =
                        SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
 
@@ -385,45 +414,69 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade
 {
        struct ras_err_node *err_node;
        uint64_t mc_umc_status;
+       struct ras_err_info *err_info;
+       struct ras_err_addr *mca_err_addr, *tmp;
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 
        for_each_ras_error(err_node, err_data) {
-               mc_umc_status = err_node->err_info.err_addr.err_status;
-               if (!mc_umc_status)
+               err_info = &err_node->err_info;
+               if (list_empty(&err_info->err_addr_list))
                        continue;
 
-               if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)) {
-                       uint64_t mca_addr, err_addr, mca_ipid;
-                       uint32_t InstanceIdLo;
-                       struct amdgpu_smuio_mcm_config_info *mcm_info;
-
-                       mcm_info = &err_node->err_info.mcm_info;
-                       mca_addr = err_node->err_info.err_addr.err_addr;
-                       mca_ipid = err_node->err_info.err_addr.err_ipid;
-
-                       err_addr =  REG_GET_FIELD(mca_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
-                       InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
-
-                       dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
-                               mca_ipid,
-                               mcm_info->die_id,
-                               MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
-                               MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
-                               err_addr);
-
-                       umc_v12_0_convert_error_address(adev,
-                               err_data, err_addr,
-                               MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
-                               MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
-                               mcm_info->die_id);
-
-                       /* Clear umc error address content */
-                       memset(&err_node->err_info.err_addr,
-                               0, sizeof(err_node->err_info.err_addr));
+               list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) {
+                       mc_umc_status = mca_err_addr->err_status;
+                       if (mc_umc_status &&
+                               (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
+                                umc_v12_0_is_deferred_error(adev, mc_umc_status))) {
+                               uint64_t mca_addr, err_addr, mca_ipid;
+                               uint32_t InstanceIdLo;
+
+                               mca_addr = mca_err_addr->err_addr;
+                               mca_ipid = mca_err_addr->err_ipid;
+
+                               err_addr = REG_GET_FIELD(mca_addr,
+                                                       MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+                               InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo);
+
+                               dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n",
+                                       mca_ipid,
+                                       err_info->mcm_info.die_id,
+                                       MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+                                       MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+                                       err_addr);
+
+                               umc_v12_0_convert_error_address(adev,
+                                       err_data, err_addr,
+                                       MCA_IPID_LO_2_UMC_CH(InstanceIdLo),
+                                       MCA_IPID_LO_2_UMC_INST(InstanceIdLo),
+                                       err_info->mcm_info.die_id);
+                       }
+
+                       /* Delete error address node from list and free memory */
+                       amdgpu_ras_del_mca_err_addr(err_info, mca_err_addr);
                }
        }
 }
 
+static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
+                       enum amdgpu_mca_error_type type, void *ras_error_status)
+{
+       uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
+
+       switch (type) {
+       case AMDGPU_MCA_ERROR_TYPE_UE:
+               return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
+       case AMDGPU_MCA_ERROR_TYPE_CE:
+               return umc_v12_0_is_correctable_error(adev, mc_umc_status);
+       case AMDGPU_MCA_ERROR_TYPE_DE:
+               return umc_v12_0_is_deferred_error(adev, mc_umc_status);
+       default:
+               return false;
+       }
+
+       return false;
+}
+
 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
 {
        amdgpu_umc_loop_channels(adev,
@@ -444,12 +497,71 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
        .query_ras_error_address = umc_v12_0_query_ras_error_address,
 };
 
+static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type,
+                                             struct aca_bank_report *report, void *data)
+{
+       struct amdgpu_device *adev = handle->adev;
+       u64 status;
+       int ret;
+
+       ret = aca_bank_info_decode(bank, &report->info);
+       if (ret)
+               return ret;
+
+       status = bank->regs[ACA_REG_IDX_STATUS];
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+               if (umc_v12_0_is_uncorrectable_error(adev, status)) {
+                       report->count[type] = 1;
+               }
+               break;
+       case ACA_ERROR_TYPE_CE:
+               if (umc_v12_0_is_correctable_error(adev, status)) {
+                       report->count[type] = 1;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
+       .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report,
+};
+
+const struct aca_info umc_v12_0_aca_info = {
+       .hwip = ACA_HWIP_TYPE_UMC,
+       .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+       .bank_ops = &umc_v12_0_aca_bank_ops,
+};
+
+static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int ret;
+
+       ret = amdgpu_umc_ras_late_init(adev, ras_block);
+       if (ret)
+               return ret;
+
+       ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
+                                 &umc_v12_0_aca_info, NULL);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
 struct amdgpu_umc_ras umc_v12_0_ras = {
        .ras_block = {
                .hw_ops = &umc_v12_0_ras_hw_ops,
+               .ras_late_init = umc_v12_0_ras_late_init,
        },
        .err_cnt_init = umc_v12_0_err_cnt_init,
        .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
        .ecc_info_query_ras_error_count = umc_v12_0_ecc_info_query_ras_error_count,
        .ecc_info_query_ras_error_address = umc_v12_0_ecc_info_query_ras_error_address,
+       .check_ecc_err_status = umc_v12_0_check_ecc_err_status,
 };
+
index e8de3a92251a2c0070345ce89c317032a386d45b..5973bfb14fceece0ef620d722f97230fe91cfc01 100644 (file)
                        (((_ipid_lo) >> 12) & 0xF))
 #define MCA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
 
+bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status);
 
+typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
+
 extern const uint32_t
        umc_v12_0_channel_idx_tbl[]
                        [UMC_V12_0_UMC_INSTANCE_NUM]
index 0d6b50528d7625e3a43ab27a6cd3c94810ba9c9c..97fa88ed770c0f6e27386a48919d860994efbd59 100644 (file)
@@ -25,7 +25,7 @@
 
 static void umc_v6_0_init_registers(struct amdgpu_device *adev)
 {
-       unsigned i,j;
+       unsigned i, j;
 
        for (i = 0; i < 4; i++)
                for (j = 0; j < 4; j++)
index cd8e459201f18ca0fd40dfa92c800c5a9271f6e4..002b08fa632f0e119858aca405e1cbea455265a4 100644 (file)
@@ -55,6 +55,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -64,6 +65,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
                /* Scalar L1 Instruction Cache (in SQC module) per bank */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -73,6 +75,7 @@ static struct kfd_gpu_cache_info kaveri_cache_info[] = {
                /* Scalar L1 Data Cache (in SQC module) per bank */
                .cache_size = 8,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -88,6 +91,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -95,8 +99,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
        },
        {
                /* Scalar L1 Instruction Cache (in SQC module) per bank */
-               .cache_size = 8,
+               .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -104,8 +109,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
        },
        {
                /* Scalar L1 Data Cache (in SQC module) per bank. */
-               .cache_size = 4,
+               .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -135,6 +141,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -144,6 +151,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -153,6 +161,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -162,6 +171,7 @@ static struct kfd_gpu_cache_info vega10_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 4096,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -174,6 +184,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -183,6 +194,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -192,6 +204,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -201,6 +214,7 @@ static struct kfd_gpu_cache_info raven_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 1024,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -213,6 +227,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -222,6 +237,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -231,6 +247,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -240,6 +257,7 @@ static struct kfd_gpu_cache_info renoir_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 1024,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -252,6 +270,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -261,6 +280,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -270,6 +290,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -279,6 +300,7 @@ static struct kfd_gpu_cache_info vega12_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 2048,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -291,6 +313,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -300,6 +323,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -309,6 +333,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -318,6 +343,7 @@ static struct kfd_gpu_cache_info vega20_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 8192,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -330,6 +356,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -339,6 +366,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -348,6 +376,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -357,6 +386,7 @@ static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 8192,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -369,6 +399,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -378,6 +409,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -387,6 +419,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -396,6 +429,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -405,6 +439,7 @@ static struct kfd_gpu_cache_info navi10_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 4096,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -417,6 +452,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -426,6 +462,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -435,6 +472,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -444,6 +482,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -453,6 +492,7 @@ static struct kfd_gpu_cache_info vangogh_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 1024,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -465,6 +505,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -474,6 +515,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -483,6 +525,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -492,6 +535,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -501,6 +545,7 @@ static struct kfd_gpu_cache_info navi14_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 2048,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -513,6 +558,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -522,6 +568,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -531,6 +578,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -540,6 +588,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -549,6 +598,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 4096,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -558,6 +608,7 @@ static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
                /* L3 Data Cache per GPU */
                .cache_size = 128*1024,
                .cache_level = 3,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -570,6 +621,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -579,6 +631,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -588,6 +641,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -597,6 +651,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -606,6 +661,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 3072,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -615,6 +671,7 @@ static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
                /* L3 Data Cache per GPU */
                .cache_size = 96*1024,
                .cache_level = 3,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -627,6 +684,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -636,6 +694,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -645,6 +704,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -654,6 +714,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -663,6 +724,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 2048,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -672,6 +734,7 @@ static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
                /* L3 Data Cache per GPU */
                .cache_size = 32*1024,
                .cache_level = 3,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -684,6 +747,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -693,6 +757,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -702,6 +767,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -711,6 +777,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -720,6 +787,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 1024,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -729,6 +797,7 @@ static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
                /* L3 Data Cache per GPU */
                .cache_size = 16*1024,
                .cache_level = 3,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -741,6 +810,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -750,6 +820,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -759,6 +830,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -768,6 +840,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -777,6 +850,7 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 2048,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -789,6 +863,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -798,6 +873,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -807,6 +883,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -816,6 +893,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -825,6 +903,7 @@ static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 256,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -837,6 +916,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                          CRAT_CACHE_FLAGS_DATA_CACHE |
                          CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -846,6 +926,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                          CRAT_CACHE_FLAGS_INST_CACHE |
                          CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -855,6 +936,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                          CRAT_CACHE_FLAGS_DATA_CACHE |
                          CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -864,6 +946,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                          CRAT_CACHE_FLAGS_DATA_CACHE |
                          CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -873,6 +956,7 @@ static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 256,
                .cache_level = 2,
+               .cache_line_size = 128,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                          CRAT_CACHE_FLAGS_DATA_CACHE |
                          CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -885,6 +969,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
                /* TCP L1 Cache per CU */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -894,6 +979,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
                /* Scalar L1 Instruction Cache per SQC */
                .cache_size = 32,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_INST_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -903,6 +989,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
                /* Scalar L1 Data Cache per SQC */
                .cache_size = 16,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -912,6 +999,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
                /* GL1 Data Cache per SA */
                .cache_size = 128,
                .cache_level = 1,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
@@ -921,6 +1009,7 @@ static struct kfd_gpu_cache_info dummy_cache_info[] = {
                /* L2 Data Cache per GPU (Total Tex Cache) */
                .cache_size = 2048,
                .cache_level = 2,
+               .cache_line_size = 64,
                .flags = (CRAT_CACHE_FLAGS_ENABLED |
                                CRAT_CACHE_FLAGS_DATA_CACHE |
                                CRAT_CACHE_FLAGS_SIMD_CACHE),
index 74c2d7a0d6285715339482b975f946779def8e58..300634b9f66832dfd4b83e2cf341fcfc75df9a3a 100644 (file)
@@ -303,6 +303,7 @@ struct kfd_node;
 struct kfd_gpu_cache_info {
        uint32_t        cache_size;
        uint32_t        cache_level;
+       uint32_t        cache_line_size;
        uint32_t        flags;
        /* Indicates how many Compute Units share this cache
         * within a SA. Value = 1 indicates the cache is not shared
index 9ec750666382fe9bfebbc7708144f67a3fe480e6..d889e3545120a2aa2da5f15feae297c9fab3846e 100644 (file)
@@ -1018,12 +1018,14 @@ int kfd_dbg_trap_device_snapshot(struct kfd_process *target,
                uint32_t *entry_size)
 {
        struct kfd_dbg_device_info_entry device_info;
-       uint32_t tmp_entry_size = *entry_size, tmp_num_devices;
+       uint32_t tmp_entry_size, tmp_num_devices;
        int i, r = 0;
 
        if (!(target && user_info && number_of_device_infos && entry_size))
                return -EINVAL;
 
+       tmp_entry_size = *entry_size;
+
        tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds);
        *number_of_device_infos = target->n_pdds;
        *entry_size = min_t(size_t, *entry_size, sizeof(device_info));
index 739721254a5dfff1b989b228dcb6cbcad3d52dd1..9b33d9d2c9ad533827befe8d5a53a8c62af041cb 100644 (file)
@@ -1285,8 +1285,10 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
        uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
        int user_gpu_id;
 
-       if (!p)
+       if (!p) {
+               dev_warn(dev->adev->dev, "Not find process with pasid:%d\n", pasid);
                return; /* Presumably process exited. */
+       }
 
        user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
        if (unlikely(user_gpu_id == -EINVAL)) {
@@ -1322,6 +1324,8 @@ void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
                }
        }
 
+       dev_warn(dev->adev->dev, "Send SIGBUS to process %s(pasid:%d)\n",
+               p->lead_thread->comm, pasid);
        rcu_read_unlock();
 
        /* user application will handle SIGBUS signal */
index a7697ec8188e094a78807e1a6fcada06318af191..9a06c6fb6605851ae9c26ff4a81c66d358b9a69f 100644 (file)
@@ -132,6 +132,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
 static void event_interrupt_poison_consumption(struct kfd_node *dev,
                                uint16_t pasid, uint16_t client_id)
 {
+       enum amdgpu_ras_block block = 0;
        int old_poison, ret = -EINVAL;
        struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
 
@@ -151,12 +152,14 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
        case SOC15_IH_CLIENTID_SE3SH:
        case SOC15_IH_CLIENTID_UTCL2:
                ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+               block = AMDGPU_RAS_BLOCK__GFX;
                break;
        case SOC15_IH_CLIENTID_SDMA0:
        case SOC15_IH_CLIENTID_SDMA1:
        case SOC15_IH_CLIENTID_SDMA2:
        case SOC15_IH_CLIENTID_SDMA3:
        case SOC15_IH_CLIENTID_SDMA4:
+               block = AMDGPU_RAS_BLOCK__SDMA;
                break;
        default:
                break;
@@ -171,12 +174,12 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev,
                dev_warn(dev->adev->dev,
                        "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
                        client_id);
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
        } else {
                dev_warn(dev->adev->dev,
                        "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
                        client_id);
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
        }
 }
 
index 2a65792fd1162ba3f21f1600724897ec78449c2f..7e2859736a558fe899c8d1bb438daa07523f2c59 100644 (file)
@@ -191,6 +191,7 @@ static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1)
 static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
                                uint16_t pasid, uint16_t source_id)
 {
+       enum amdgpu_ras_block block = 0;
        int ret = -EINVAL;
        struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
 
@@ -210,9 +211,11 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
        case SOC15_INTSRC_SQ_INTERRUPT_MSG:
                if (dev->dqm->ops.reset_queues)
                        ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+               block = AMDGPU_RAS_BLOCK__GFX;
                break;
        case SOC21_INTSRC_SDMA_ECC:
        default:
+               block = AMDGPU_RAS_BLOCK__GFX;
                break;
        }
 
@@ -221,9 +224,9 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev,
        /* resetting queue passes, do page retirement without gpu reset
           resetting queue fails, fallback to gpu reset solution */
        if (!ret)
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
        else
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
 }
 
 static bool event_interrupt_isr_v11(struct kfd_node *dev,
index 27cdaea405017aed21ff447eec833068a9f9b101..91dd5e045b511d2aaa42aa4bd9b934e018c407ab 100644 (file)
@@ -143,6 +143,7 @@ enum SQ_INTERRUPT_ERROR_TYPE {
 static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
                                uint16_t pasid, uint16_t client_id)
 {
+       enum amdgpu_ras_block block = 0;
        int old_poison, ret = -EINVAL;
        struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
 
@@ -162,12 +163,14 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
        case SOC15_IH_CLIENTID_SE3SH:
        case SOC15_IH_CLIENTID_UTCL2:
                ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
+               block = AMDGPU_RAS_BLOCK__GFX;
                break;
        case SOC15_IH_CLIENTID_SDMA0:
        case SOC15_IH_CLIENTID_SDMA1:
        case SOC15_IH_CLIENTID_SDMA2:
        case SOC15_IH_CLIENTID_SDMA3:
        case SOC15_IH_CLIENTID_SDMA4:
+               block = AMDGPU_RAS_BLOCK__SDMA;
                break;
        default:
                break;
@@ -182,12 +185,12 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
                dev_warn(dev->adev->dev,
                        "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
                        client_id);
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false);
        } else {
                dev_warn(dev->adev->dev,
                        "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
                        client_id);
-               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+               amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true);
        }
 }
 
index c50a0dc9c9c072f5692d003bce90aaaf13615c5d..f0f7f48af4137acb088e2e903f803c345babc3ec 100644 (file)
@@ -1515,9 +1515,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
                        goto unreserve_out;
                }
 
-               r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
-                                             drm_priv_to_vm(pdd->drm_priv),
-                                             svm_range_bo_validate, NULL);
+               r = amdgpu_vm_validate(pdd->dev->adev,
+                                      drm_priv_to_vm(pdd->drm_priv), NULL,
+                                      svm_range_bo_validate, NULL);
                if (r) {
                        pr_debug("failed %d validate pt bos\n", r);
                        goto unreserve_out;
@@ -1641,7 +1641,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                goto free_ctx;
        }
 
-       svm_range_reserve_bos(ctx, intr);
+       r = svm_range_reserve_bos(ctx, intr);
+       if (r)
+               goto free_ctx;
 
        p = container_of(prange->svms, struct kfd_process, svms);
        owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
index e5f7c92eebcbbfa6a1fda115ca2b599cab48e4e8..0136c27ef49f9a4af27d1da1074851bf0fec25d2 100644 (file)
@@ -1564,6 +1564,7 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
                pcache->processor_id_low = cu_processor_id + (first_active_cu - 1);
                pcache->cache_level = pcache_info[cache_type].cache_level;
                pcache->cache_size = pcache_info[cache_type].cache_size;
+               pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
 
                if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
                        pcache->cache_type |= HSA_CACHE_TYPE_DATA;
@@ -1632,6 +1633,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
                pcache->processor_id_low = cu_processor_id
                                        + (first_active_cu - 1);
                pcache->cache_level = pcache_info[cache_type].cache_level;
+               pcache->cacheline_size = pcache_info[cache_type].cache_line_size;
 
                if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
                        mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
@@ -1705,6 +1707,7 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
 
        gpu_processor_id = dev->node_props.simd_id_base;
 
+       memset(cache_info, 0, sizeof(cache_info));
        pcache_info = cache_info;
        num_of_cache_types = kfd_get_gpu_cache_info(kdev, &pcache_info);
        if (!num_of_cache_types) {
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
deleted file mode 100644 (file)
index a8a6c10..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-===============================================================================
-TODOs
-===============================================================================
-
-1. Base this on drm-next - WIP
-
-
-2. Cleanup commit history
-
-
-3. WIP - Drop page flip helper and use DRM's version
-
-
-4. DONE - Flatten all DC objects
-    * dc_stream/core_stream/stream should just be dc_stream
-    * Same for other DC objects
-
-    "Is there any major reason to keep all those abstractions?
-
-    Could you collapse everything into struct dc_stream?
-
-    I haven't looked recently but I didn't get the impression there was a
-    lot of design around what was public/protected, more whatever needed
-    to be used by someone else was in public."
-    ~ Dave Airlie
-
-
-5. DONE - Rename DC objects to align more with DRM
-    * dc_surface -> dc_plane_state
-    * dc_stream -> dc_stream_state
-
-
-6. DONE - Per-plane and per-stream validation
-
-
-7. WIP - Per-plane and per-stream commit
-
-
-8. WIP - Split pipe_ctx into plane and stream resource structs
-
-
-9. Attach plane and stream reources to state object instead of validate_context
-
-
-10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
-    * Use drm_display_info instead
-    * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
-
-    "Making sure you use the sink-specific helper libraries and kernel
-    subsystems, since there's really no good reason to have 2nd
-    implementation of those in the kernel. Looks likes that's done for mst
-    and edid parsing. There's still a bit a midlayer feeling to the edid
-    parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
-    think it'd be much better if you convert that over to reading stuff
-    from drm_display_info and if needed, push stuff into the core). Also,
-    I can't come up with a good reason why DC needs all this (except to
-    reimplement half of our edid quirk table, which really isn't a good
-    idea). Might be good if you put this onto the list of things to fix
-    long-term, but imo not a blocker. Definitely make sure new stuff
-    doesn't slip in (i.e. if you start adding edid quirks to DC instead of
-    the drm core, refactoring to use the core edid stuff was pointless)."
-    ~ Daniel Vetter
-
-
-11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
-overy complicated HW programming function for sendind and receiving i2c/aux
-commands. We can greatly simplify that and move it into dc/dceXYZ like other
-HW blocks.
-
-12. drm_modeset_lock in MST should no longer be needed in recent kernels
-    * Adopt appropriate locking scheme
-
-13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
-a few indirections, and consider removing entirely and using the
-drm_atomic_helper_best_encoder default behaviour.
-
-14. core/dc_debug.c, consider switching to the atomic state debug helpers and
-moving all your driver state printing into the various atomic_print_state
-callbacks. There's also plans to expose this stuff in a standard way across all
-drivers, to make debugging userspace compositors easier across different hw.
-
-15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
-dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
-
-16. Move to core SCDC helpers (I think those are new since initial DC review).
-
-17. There's still a pretty massive layer cake around dp aux and DPCD handling,
-with like 3 levels of abstraction and using your own structures instead of the
-stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
-incompatible styles, just means more reasons not to add a third (or well third
-one gets to do the cleanup refactor).
-
-18. There's a pile of sink handling code, both for DP and HDMI where I didn't
-immediately recognize the standard. I think long term it'd be best for the drm
-subsystem if we try to move as much of that into helpers/core as possible, and
-share it with drivers. But that's a very long term goal, and by far not just an
-issue with DC - other drivers, especially around DP sink handling, are equally
-guilty.
-
-19. DONE - The DC logger is still a rather sore thing, but I know that the
-DRM_DEBUG stuff just isn't up to the challenges either. We need to figure out
-something that integrates better with DRM and linux debug printing, while not
-being useless with filtering output. dynamic debug printing might be an option.
-
-20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
-retimer that we need to program to pass PHY compliance. Currently that's
-bypassing the i2c device and goes directly to HW. This should be changed.
-
-21. Remove vector.c from dc/basics. It's used in DDC code which can probably
-be simplified enough to no longer need a vector implementation.
index d292f290cd6ebbfe4a2174923470a2f60e1eadc7..467796d97313e215f12cb2640dbdb00731cd2a10 100644 (file)
@@ -67,6 +67,7 @@
 #include "amdgpu_dm_debugfs.h"
 #endif
 #include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
@@ -2121,6 +2122,16 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        const struct dmcub_firmware_header_v1_0 *hdr;
        enum dmub_asic dmub_asic;
        enum dmub_status status;
+       static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_0_INST_CONST
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_1_STACK
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_2_BSS_DATA
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_3_VBIOS
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_4_MAILBOX
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_5_TRACEBUFF
+               DMUB_WINDOW_MEMORY_TYPE_FB,             //DMUB_WINDOW_6_FW_STATE
+               DMUB_WINDOW_MEMORY_TYPE_FB              //DMUB_WINDOW_7_SCRATCH_MEM
+       };
        int r;
 
        switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
@@ -2218,7 +2229,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                PSP_HEADER_BYTES;
-       region_params.is_mailbox_in_inbox = false;
+       region_params.window_memory_type = window_memory_type;
 
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
@@ -2246,6 +2257,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
        memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
        memory_params.region_info = &region_info;
+       memory_params.window_memory_type = window_memory_type;
 
        adev->dm.dmub_fb_info =
                kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@@ -4399,6 +4411,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
+       bool replay_feature_enabled = false;
        int max_overlay = dm->dc->caps.max_slave_planes;
 
        dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4510,6 +4523,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
        }
 
+       /* Determine whether to enable Replay support by default. */
+       if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+               case IP_VERSION(3, 1, 4):
+               case IP_VERSION(3, 1, 5):
+               case IP_VERSION(3, 1, 6):
+               case IP_VERSION(3, 2, 0):
+               case IP_VERSION(3, 2, 1):
+               case IP_VERSION(3, 5, 0):
+                       replay_feature_enabled = true;
+                       break;
+               default:
+                       replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+                       break;
+               }
+       }
+
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
@@ -4578,6 +4608,11 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                                amdgpu_dm_update_connector_after_detect(aconnector);
                                setup_backlight_device(dm, aconnector);
 
+                               /* Disable PSR if Replay can be enabled */
+                               if (replay_feature_enabled)
+                                       if (amdgpu_dm_set_replay_caps(link, aconnector))
+                                               psr_feature_enabled = false;
+
                                if (psr_feature_enabled)
                                        amdgpu_dm_set_psr_caps(link);
 
@@ -6402,10 +6437,81 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
        return ret;
 }
 
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ *   # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct drm_connector *connector = dev_get_drvdata(device);
+       struct drm_device *dev = connector->dev;
+       u8 val;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       val = to_dm_connector_state(connector->state)->abm_level ==
+               ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+               to_dm_connector_state(connector->state)->abm_level;
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t count)
+{
+       struct drm_connector *connector = dev_get_drvdata(device);
+       struct drm_device *dev = connector->dev;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 0, &val);
+
+       if (ret)
+               return ret;
+
+       if (val < 0 || val > 4)
+               return -EINVAL;
+
+       drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+       to_dm_connector_state(connector->state)->abm_level = val ?:
+               ABM_LEVEL_IMMEDIATE_DISABLE;
+       drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       drm_kms_helper_hotplug_event(dev);
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+       &dev_attr_panel_power_savings.attr,
+       NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+       .name = "amdgpu",
+       .attrs = amdgpu_attrs
+};
+
 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+               sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
        drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
 }
 
@@ -6507,6 +6613,13 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
                to_amdgpu_dm_connector(connector);
        int r;
 
+       if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+               r = sysfs_create_group(&connector->kdev->kobj,
+                                      &amdgpu_group);
+               if (r)
+                       return r;
+       }
+
        amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
 
        if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -8526,10 +8639,22 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        dm_update_pflip_irq_state(drm_to_adev(dev),
                                                  acrtc_attach);
 
-               if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
-                               !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
-                       amdgpu_dm_link_setup_psr(acrtc_state->stream);
+               if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+                       if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+                                       !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+                               struct amdgpu_dm_connector *aconn =
+                                       (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+                               amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+                       } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+                                       !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+                               struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+                                       acrtc_state->stream->dm_stream_context;
+
+                               if (!aconn->disallow_edp_enter_psr)
+                                       amdgpu_dm_link_setup_psr(acrtc_state->stream);
+                       }
+               }
 
                /* Decrement skip count when PSR is enabled and we're doing fast updates. */
                if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8556,6 +8681,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                            !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
 #endif
                            !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+                           !aconn->disallow_edp_enter_psr &&
                            (timestamp_ns -
                            acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
                            500000000)
@@ -8818,11 +8944,12 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
                }
        } /* for_each_crtc_in_state() */
 
-       /* if there mode set or reset, disable eDP PSR */
+       /* if there mode set or reset, disable eDP PSR, Replay */
        if (mode_set_reset_required) {
                if (dm->vblank_control_workqueue)
                        flush_workqueue(dm->vblank_control_workqueue);
 
+               amdgpu_dm_replay_disable_all(dm);
                amdgpu_dm_psr_disable_all(dm);
        }
 
@@ -10731,11 +10858,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                }
 
-               ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
-               if (ret) {
-                       DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
-                       ret = -EINVAL;
-                       goto fail;
+               if (dc_resource_is_dsc_encoding_supported(dc)) {
+                       ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+                       if (ret) {
+                               DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+                               ret = -EINVAL;
+                               goto fail;
+                       }
                }
 
                ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
index 9c1871b866cc973091cd9a7bc5bc604201f6bd16..09519b7abf67ba64f19d78599ae46723ae7004dd 100644 (file)
@@ -693,6 +693,7 @@ struct amdgpu_dm_connector {
        struct drm_display_mode freesync_vid_base;
 
        int psr_skip_count;
+       bool disallow_edp_enter_psr;
 
        /* Record progress status of mst*/
        uint8_t mst_status;
index 6e715ef3a5566edb1f65bab544b2017dc176b7a3..e23a0a276e330d55be75a58f2771e9a335c84261 100644 (file)
@@ -29,6 +29,7 @@
 #include "dc.h"
 #include "amdgpu.h"
 #include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
 #include "amdgpu_dm_crtc.h"
 #include "amdgpu_dm_plane.h"
 #include "amdgpu_dm_trace.h"
@@ -95,6 +96,61 @@ bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
               dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
 }
 
+/**
+ * amdgpu_dm_crtc_set_panel_sr_feature() - Manage panel self-refresh features.
+ *
+ * @vblank_work:    is a pointer to a struct vblank_control_work object.
+ * @vblank_enabled: indicates whether the DRM vblank counter is currently
+ *                  enabled (true) or disabled (false).
+ * @allow_sr_entry: represents whether entry into the self-refresh mode is
+ *                  allowed (true) or not allowed (false).
+ *
+ * The DRM vblank counter enable/disable action is used as the trigger to enable
+ * or disable various panel self-refresh features:
+ *
+ * Panel Replay and PSR SU
+ * - Enable when:
+ *      - vblank counter is disabled
+ *      - entry is allowed: usermode demonstrates an adequate number of fast
+ *        commits)
+ *     - CRC capture window isn't active
+ * - Keep enabled even when vblank counter gets enabled
+ *
+ * PSR1
+ * - Enable condition same as above
+ * - Disable when vblank counter is enabled
+ */
+static void amdgpu_dm_crtc_set_panel_sr_feature(
+       struct vblank_control_work *vblank_work,
+       bool vblank_enabled, bool allow_sr_entry)
+{
+       struct dc_link *link = vblank_work->stream->link;
+       bool is_sr_active = (link->replay_settings.replay_allow_active ||
+                                link->psr_settings.psr_allow_active);
+       bool is_crc_window_active = false;
+
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+       is_crc_window_active =
+               amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base);
+#endif
+
+       if (link->replay_settings.replay_feature_enabled &&
+               allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+               amdgpu_dm_replay_enable(vblank_work->stream, true);
+       } else if (vblank_enabled) {
+               if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active)
+                       amdgpu_dm_psr_disable(vblank_work->stream);
+       } else if (link->psr_settings.psr_feature_enabled &&
+               allow_sr_entry && !is_sr_active && !is_crc_window_active) {
+
+               struct amdgpu_dm_connector *aconn =
+                       (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context;
+
+               if (!aconn->disallow_edp_enter_psr)
+                       amdgpu_dm_psr_enable(vblank_work->stream);
+       }
+}
+
 static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
 {
        struct vblank_control_work *vblank_work =
@@ -123,18 +179,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
         * fill_dc_dirty_rects().
         */
        if (vblank_work->stream && vblank_work->stream->link) {
-               if (vblank_work->enable) {
-                       if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
-                           vblank_work->stream->link->psr_settings.psr_allow_active)
-                               amdgpu_dm_psr_disable(vblank_work->stream);
-               } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
-                          !vblank_work->stream->link->psr_settings.psr_allow_active &&
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-                          !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
-#endif
-                          vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
-                       amdgpu_dm_psr_enable(vblank_work->stream);
-               }
+               amdgpu_dm_crtc_set_panel_sr_feature(
+                       vblank_work, vblank_work->enable,
+                       vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
+                       vblank_work->stream->link->replay_settings.replay_feature_enabled);
        }
 
        mutex_unlock(&dm->dc_lock);
index 68a846323912768edea6c6d330a3491c3aff1b27..eee4945653e2d18d09f8bfc8175251499950f53c 100644 (file)
@@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
        const uint32_t rd_buf_size = 10;
        struct pipe_ctx *pipe_ctx;
        ssize_t result = 0;
-       int i, r, str_len = 30;
+       int i, r, str_len = 10;
 
        rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
 
@@ -2971,6 +2971,53 @@ static int allow_edp_hotplug_detection_set(void *data, u64 val)
        return 0;
 }
 
+/* check if kernel disallow eDP enter psr state
+ * cat /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ */
+static int disallow_edp_enter_psr_get(void *data, u64 *val)
+{
+       struct amdgpu_dm_connector *aconnector = data;
+
+       *val = (u64) aconnector->disallow_edp_enter_psr;
+       return 0;
+}
+
+/* set kernel disallow eDP enter psr state
+ * echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 0: allow edp enter psr; 1: disallow
+ *
+ * usage: test app read crc from PSR eDP rx.
+ *
+ * during kernel boot up, kernel write dpcd 0x170 = 5.
+ * this notify eDP rx psr enable and let rx check crc.
+ * rx fw will start checking crc for rx internal logic.
+ * crc read count within dpcd 0x246 is not updated and
+ * value is 0. when eDP tx driver wants to read rx crc
+ * from dpcd 0x246, 0x270, read count 0 lead tx driver
+ * timeout.
+ *
+ * to avoid this, we add this debugfs to let test app to disbable
+ * rx crc checking for rx internal logic. then test app can read
+ * non-zero crc read count.
+ *
+ * expected app sequence is as below:
+ * 1. disable eDP PHY and notify eDP rx with dpcd 0x600 = 2.
+ * 2. echo 0x1 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr
+ * 3. enable eDP PHY and notify eDP rx with dpcd 0x600 = 1 but
+ *    without dpcd 0x170 = 5.
+ * 4. read crc from rx dpcd 0x270, 0x246, etc.
+ * 5. echo 0x0 /sys/kernel/debug/dri/0/eDP-X/disallow_edp_enter_psr.
+ *    this will let eDP back to normal with psr setup dpcd 0x170 = 5.
+ */
+static int disallow_edp_enter_psr_set(void *data, u64 val)
+{
+       struct amdgpu_dm_connector *aconnector = data;
+
+       aconnector->disallow_edp_enter_psr = val ? true : false;
+       return 0;
+}
+
 static int dmub_trace_mask_set(void *data, u64 val)
 {
        struct amdgpu_device *adev = data;
@@ -3092,6 +3139,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops,
                        allow_edp_hotplug_detection_get,
                        allow_edp_hotplug_detection_set, "%llu\n");
 
+DEFINE_DEBUGFS_ATTRIBUTE(disallow_edp_enter_psr_fops,
+                       disallow_edp_enter_psr_get,
+                       disallow_edp_enter_psr_set, "%llu\n");
+
 DEFINE_SHOW_ATTRIBUTE(current_backlight);
 DEFINE_SHOW_ATTRIBUTE(target_backlight);
 
@@ -3265,6 +3316,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
                                        &edp_ilr_debugfs_fops);
                debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector,
                                        &allow_edp_hotplug_detection_fops);
+               debugfs_create_file("disallow_edp_enter_psr", 0644, dir, connector,
+                                       &disallow_edp_enter_psr_fops);
        }
 
        for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) {
index 5ce542b1f8604202be02ca4cac07b041b809aa4e..738a58eebba780fc7f846b29687a065fbdb80107 100644 (file)
@@ -60,21 +60,26 @@ static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connecto
        if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT)
                return false;
 
+       // Sink shall populate line deviation information
+       if (dpcd_caps->pr_info.pixel_deviation_per_line == 0 ||
+               dpcd_caps->pr_info.max_deviation_line == 0)
+               return false;
+
        return true;
 }
 
 /*
- * amdgpu_dm_setup_replay() - setup replay configuration
+ * amdgpu_dm_set_replay_caps() - setup Replay capabilities
  * @link: link
  * @aconnector: aconnector
  *
  */
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
 {
-       struct replay_config pr_config;
+       struct replay_config pr_config = { 0 };
        union replay_debug_flags *debug_flags = NULL;
 
-       // For eDP, if Replay is supported, return true to skip checks
+       // If Replay is already set to support, return true to skip checks
        if (link->replay_settings.config.replay_supported)
                return true;
 
@@ -87,27 +92,50 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
        if (!link_supports_replay(link, aconnector))
                return false;
 
-       // Mark Replay is supported in link and update related attributes
+       // Mark Replay is supported in pr_config
        pr_config.replay_supported = true;
-       pr_config.replay_power_opt_supported = 0;
-       pr_config.replay_enable_option |= pr_enable_option_static_screen;
-       pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
-
-       if (!pr_config.replay_timing_sync_supported)
-               pr_config.replay_enable_option &= ~pr_enable_option_general_ui;
 
        debug_flags = (union replay_debug_flags *)&pr_config.debug_flags;
        debug_flags->u32All = 0;
        debug_flags->bitfields.visual_confirm =
                link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY;
 
-       link->replay_settings.replay_feature_enabled = true;
-
        init_replay_config(link, &pr_config);
 
        return true;
 }
 
+/*
+ * amdgpu_dm_link_setup_replay() - configure replay link
+ * @link: link
+ * @aconnector: aconnector
+ *
+ */
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector)
+{
+       struct replay_config *pr_config;
+
+       if (link == NULL || aconnector == NULL)
+               return false;
+
+       pr_config = &link->replay_settings.config;
+
+       if (!pr_config->replay_supported)
+               return false;
+
+       pr_config->replay_power_opt_supported = 0x11;
+       pr_config->replay_smu_opt_supported = false;
+       pr_config->replay_enable_option |= pr_enable_option_static_screen;
+       pr_config->replay_support_fast_resync_in_ultra_sleep_mode = aconnector->max_vfreq >= 2 * aconnector->min_vfreq;
+       pr_config->replay_timing_sync_supported = false;
+
+       if (!pr_config->replay_timing_sync_supported)
+               pr_config->replay_enable_option &= ~pr_enable_option_general_ui;
+
+       link->replay_settings.replay_feature_enabled = true;
+
+       return true;
+}
 
 /*
  * amdgpu_dm_replay_enable() - enable replay f/w
@@ -117,51 +145,23 @@ bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *ac
  */
 bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
 {
-       uint64_t state;
-       unsigned int retry_count;
        bool replay_active = true;
-       const unsigned int max_retry = 1000;
-       bool force_static = true;
        struct dc_link *link = NULL;
 
-
        if (stream == NULL)
                return false;
 
        link = stream->link;
 
-       if (link == NULL)
-               return false;
-
-       link->dc->link_srv->edp_setup_replay(link, stream);
-
-       link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL);
-
-       link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL);
-
-       if (wait == true) {
-
-               for (retry_count = 0; retry_count <= max_retry; retry_count++) {
-                       dc_link_get_replay_state(link, &state);
-                       if (replay_active) {
-                               if (state != REPLAY_STATE_0 &&
-                                       (!force_static || state == REPLAY_STATE_3))
-                                       break;
-                       } else {
-                               if (state == REPLAY_STATE_0)
-                                       break;
-                       }
-                       udelay(500);
-               }
-
-               /* assert if max retry hit */
-               if (retry_count >= max_retry)
-                       ASSERT(0);
-       } else {
-               /* To-do: Add trace log */
+       if (link) {
+               link->dc->link_srv->edp_setup_replay(link, stream);
+               link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+               DRM_DEBUG_DRIVER("Enabling replay...\n");
+               link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
+               return true;
        }
 
-       return true;
+       return false;
 }
 
 /*
@@ -172,12 +172,31 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
  */
 bool amdgpu_dm_replay_disable(struct dc_stream_state *stream)
 {
+       bool replay_active = false;
+       struct dc_link *link = NULL;
 
-       if (stream->link) {
+       if (stream == NULL)
+               return false;
+
+       link = stream->link;
+
+       if (link) {
                DRM_DEBUG_DRIVER("Disabling replay...\n");
-               stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL);
+               link->dc->link_srv->edp_set_replay_allow_active(stream->link, &replay_active, true, false, NULL);
                return true;
        }
 
        return false;
 }
+
+/*
+ * amdgpu_dm_replay_disable_all() - disable replay f/w
+ * if replay is enabled on any stream
+ *
+ * Return: true if success
+ */
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm)
+{
+       DRM_DEBUG_DRIVER("Disabling replay if replay is enabled on any stream\n");
+       return dc_set_replay_allow_active(dm->dc, false);
+}
index 01cba3cd62463a2a6ea172383e7fbfebf7d5419a..f0d30eb473126cd0ccf5fa38768c693e70d403b8 100644 (file)
@@ -40,7 +40,9 @@ enum replay_enable_option {
 
 
 bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable);
-bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_set_replay_caps(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
+bool amdgpu_dm_link_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector);
 bool amdgpu_dm_replay_disable(struct dc_stream_state *stream);
+bool amdgpu_dm_replay_disable_all(struct amdgpu_display_manager *dm);
 
 #endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */
index 1090d235086aca2f99af4c5f7f5483574b7813af..bd1f60ecaba4f83bc3ecc58bb368db52c3cd6ea9 100644 (file)
@@ -101,6 +101,40 @@ void convert_float_matrix(
        }
 }
 
+static struct fixed31_32 int_frac_to_fixed_point(uint16_t arg,
+                                                uint8_t integer_bits,
+                                                uint8_t fractional_bits)
+{
+       struct fixed31_32 result;
+       uint16_t sign_mask = 1 << (fractional_bits + integer_bits);
+       uint16_t value_mask = sign_mask - 1;
+
+       result.value = (long long)(arg & value_mask) <<
+                      (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+
+       if (arg & sign_mask)
+               result = dc_fixpt_neg(result);
+
+       return result;
+}
+
+/**
+ * convert_hw_matrix - converts HW values into fixed31_32 matrix.
+ * @matrix: fixed point 31.32 matrix
+ * @reg: array of register values
+ * @buffer_size: size of the array of register values
+ *
+ * Converts HW register spec defined format S2D13 into a fixed-point 31.32
+ * matrix.
+ */
+void convert_hw_matrix(struct fixed31_32 *matrix,
+                      uint16_t *reg,
+                      uint32_t buffer_size)
+{
+       for (int i = 0; i < buffer_size; ++i)
+               matrix[i] = int_frac_to_fixed_point(reg[i], 2, 13);
+}
+
 static uint32_t find_gcd(uint32_t a, uint32_t b)
 {
        uint32_t remainder;
index 81da4e6f7a1acb074c02d719b068661b1c06db81..a433cef78496f22e8207f283e32f3d46cec8b058 100644 (file)
@@ -41,6 +41,10 @@ void convert_float_matrix(
 void reduce_fraction(uint32_t num, uint32_t den,
                uint32_t *out_num, uint32_t *out_den);
 
+void convert_hw_matrix(struct fixed31_32 *matrix,
+                      uint16_t *reg,
+                      uint32_t buffer_size);
+
 static inline unsigned int log_2(unsigned int num)
 {
        return ilog2(num);
index 818a529cacc37382e6598604270cd06051255a93..86f9198e7501185048f582be4962c2597772ded1 100644 (file)
@@ -37,7 +37,7 @@
 #define EXEC_BIOS_CMD_TABLE(command, params)\
        (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
                GetIndexIntoMasterTable(COMMAND, command), \
-               (uint32_t *)&params) == 0)
+               (uint32_t *)&params, sizeof(params)) == 0)
 
 #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
        amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
index 293a919d605d1657d3963ed292cd4c3115d3fb3f..cbae1be7b0093cfa76f4263055f90b4e9b358a64 100644 (file)
@@ -49,7 +49,7 @@
 #define EXEC_BIOS_CMD_TABLE(fname, params)\
        (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
                GET_INDEX_INTO_MASTER_TABLE(command, fname), \
-               (uint32_t *)&params) == 0)
+               (uint32_t *)&params, sizeof(params)) == 0)
 
 #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
        amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
index 0c6a4ab72b1d298e00a856c640e936fac2f89a92..e3e1940198a9c55f6ad036fa411d2cf9226c1173 100644 (file)
@@ -707,9 +707,7 @@ void rn_clk_mgr_construct(
        int is_green_sardine = 0;
        struct clk_log_info log_info = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_FP)
        is_green_sardine = ASICREV_IS_GREEN_SARDINE(ctx->asic_id.hw_internal_rev);
-#endif
 
        clk_mgr->base.ctx = ctx;
        clk_mgr->base.funcs = &dcn21_funcs;
index 8c9d45e5b13b8399984cf35c7b22e486d74d4552..d72acbb049b1ba33bd0a3bbdd2d37c4dff12ea52 100644 (file)
@@ -185,10 +185,6 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index e4f96b6fd79d0189896d50f94ab5eb70297db7dc..19e5b3be92757ce8a59bfb951341d3b3e607c699 100644 (file)
@@ -180,10 +180,6 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index 32279c5db72483f24106a5b282622903baaaa6ac..6904e95113c12ab243fa878e941e6a1d7c033e4a 100644 (file)
@@ -202,10 +202,6 @@ int dcn31_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int requeste
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index 07baa10a86473d64ed06efbb5d0b3ecf92af8aa2..c4af406146b7731fd2dda0a2c395ba5f2792d369 100644 (file)
@@ -220,12 +220,6 @@ int dcn314_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n",
-                       actual_dcfclk_set_mhz,
-                       actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index 1042cf1a3ab04b9fb5d8e4cbe0283c4a06148a44..879f1494c4cd9d04ea400c6f9bea0104174d7b98 100644 (file)
@@ -215,10 +215,6 @@ int dcn315_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index 3ed19197a75583cb636ead09425ef2a352f5042c..8b82092b91cd9d42d8d7edeca4ce100d8a1c256e 100644 (file)
@@ -189,10 +189,6 @@ int dcn316_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
                        khz_to_mhz_ceil(requested_dcfclk_khz));
 
-#ifdef DBG
-       smu_print("actual_dcfclk_set_mhz %d is set to : %d\n", actual_dcfclk_set_mhz, actual_dcfclk_set_mhz * 1000);
-#endif
-
        return actual_dcfclk_set_mhz * 1000;
 }
 
index aadd07bc68c5dba969fb4f7e158ba1ea990c7b5a..e64e45e4c8338d68ecc672776aca6afc47c1816b 100644 (file)
@@ -387,7 +387,15 @@ static void dcn32_update_clocks_update_dentist(
                uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
 
                if (clk_mgr->smu_present)
-                       dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+                       /*
+                        * SMU uses discrete dispclk presets. We applied
+                        * the same formula to increase our dppclk_khz
+                        * to the next matching discrete value. By
+                        * contract, we should use the preset dispclk
+                        * floored in Mhz to describe the intended clock.
+                        */
+                       dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+                                       khz_to_mhz_floor(temp_dispclk_khz));
 
                if (dc->debug.override_dispclk_programming) {
                        REG_GET(DENTIST_DISPCLK_CNTL,
@@ -426,7 +434,15 @@ static void dcn32_update_clocks_update_dentist(
 
        /* do requested DISPCLK updates*/
        if (clk_mgr->smu_present)
-               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+               /*
+                * SMU uses discrete dispclk presets. We applied
+                * the same formula to increase our dppclk_khz
+                * to the next matching discrete value. By
+                * contract, we should use the preset dispclk
+                * floored in Mhz to describe the intended clock.
+                */
+               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK,
+                               khz_to_mhz_floor(clk_mgr->base.clks.dispclk_khz));
 
        if (dc->debug.override_dispclk_programming) {
                REG_GET(DENTIST_DISPCLK_CNTL,
@@ -493,6 +509,8 @@ static void dcn32_auto_dpm_test_log(
                }
        }
 
+       msleep(5);
+
        mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
 
     dispclk_khz_reg    = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
@@ -734,7 +752,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
 
                if (clk_mgr->smu_present && !dpp_clock_lowered)
-                       dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+                       /*
+                        * SMU uses discrete dppclk presets. We applied
+                        * the same formula to increase our dppclk_khz
+                        * to the next matching discrete value. By
+                        * contract, we should use the preset dppclk
+                        * floored in Mhz to describe the intended clock.
+                        */
+                       dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+                                       khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
 
                update_dppclk = true;
        }
@@ -765,7 +791,15 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                        dcn32_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
                        dcn32_update_clocks_update_dentist(clk_mgr, context);
                        if (clk_mgr->smu_present)
-                               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
+                               /*
+                                * SMU uses discrete dppclk presets. We applied
+                                * the same formula to increase our dppclk_khz
+                                * to the next matching discrete value. By
+                                * contract, we should use the preset dppclk
+                                * floored in Mhz to describe the intended clock.
+                                */
+                               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK,
+                                               khz_to_mhz_floor(clk_mgr_base->clks.dppclk_khz));
                } else {
                        /* if clock is being raised, increase refclk before lowering DTO */
                        if (update_dppclk || update_dispclk)
index a34c258c19dc20e492d6579d84e3ab0d04d67ae6..c76352a817de52310fbc4a56c7cfebff37f965f4 100644 (file)
@@ -36,8 +36,7 @@
 #define DALSMC_MSG_SetCabForUclkPstate 0x12
 #define DALSMC_Result_OK                               0x1
 
-void
-dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
+void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable);
 void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
 void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
 void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
index 14cec1c7b718c4ab48fbd9588f4b6465c13897cd..06edca50a8fa1cbe7bcbda2cad48bcf2bbb6ea39 100644 (file)
@@ -384,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
        dcn35_smu_enable_pme_wa(clk_mgr);
 }
 
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
-       uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
-       memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
-       // Assumption is that boot state always supports pstate
-       clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;      // restore ref_dtbclk
-       clk_mgr->clks.p_state_change_support = true;
-       clk_mgr->clks.prev_p_state_change_support = true;
-       clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
-       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
 
 bool dcn35_are_clock_states_equal(struct dc_clocks *a,
                struct dc_clocks *b)
@@ -422,6 +409,23 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
 {
 }
 
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+       uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+       memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+       clk_mgr->clks.dtbclk_en = true;
+       clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk;      // restore ref_dtbclk
+       clk_mgr->clks.p_state_change_support = true;
+       clk_mgr->clks.prev_p_state_change_support = true;
+       clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+       clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+       init_clk_states(clk_mgr);
+}
 static struct clk_bw_params dcn35_bw_params = {
        .vram_type = Ddr4MemType,
        .num_channels = 1,
@@ -826,7 +830,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
        }
 }
 
-static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
+static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        struct dc *dc = clk_mgr_base->ctx->dc;
@@ -874,7 +878,7 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
        return ips_supported;
 }
 
-static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
+static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 
@@ -883,7 +887,7 @@ static uint32_t dcn35_get_idle_state(struct clk_mgr *clk_mgr_base)
 
 static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
 {
-       dcn35_init_clocks(clk_mgr);
+       init_clk_states(clk_mgr);
 
 /* TODO: Implement the functions and remove the ifndef guard */
 }
@@ -968,8 +972,8 @@ static struct clk_mgr_funcs dcn35_funcs = {
        .set_low_power_state = dcn35_set_low_power_state,
        .exit_low_power_state = dcn35_exit_low_power_state,
        .is_ips_supported = dcn35_is_ips_supported,
-       .set_idle_state = dcn35_set_idle_state,
-       .get_idle_state = dcn35_get_idle_state
+       .set_idle_state = dcn35_set_ips_idle_state,
+       .get_idle_state = dcn35_get_ips_idle_state
 };
 
 struct clk_mgr_funcs dcn35_fpga_funcs = {
index 6d4a1ffab5ed9d7ed1f82830eee45e818851f6ec..a07f7e685d2890dc7aa50fa0cda8fc6256c156c0 100644 (file)
@@ -447,6 +447,9 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
 
 void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
 {
+       if (!clk_mgr->smu_present)
+               return;
+
        dcn35_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
@@ -458,6 +461,9 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
 {
        int retv;
 
+       if (!clk_mgr->smu_present)
+               return 0;
+
        retv = dcn35_smu_send_msg_with_param(
                clk_mgr,
                VBIOSSMC_MSG_DispPsrExit,
@@ -470,6 +476,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
 {
        int retv;
 
+       if (!clk_mgr->smu_present)
+               return 0;
+
        retv = dcn35_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_QueryIPS2Support,
@@ -481,6 +490,9 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
 
 void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
 {
+       if (!clk_mgr->smu_present)
+               return;
+
        REG_WRITE(MP1_SMN_C2PMSG_71, param);
        //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
 }
@@ -489,6 +501,9 @@ uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
 {
        uint32_t retv;
 
+       if (!clk_mgr->smu_present)
+               return 0;
+
        retv = REG_READ(MP1_SMN_C2PMSG_71);
        //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n",  __func__, retv);
        return retv;
index aa7c02ba948e9ce63aa84eb7518f9c73c80d107a..72512903f88f76c126bc958f9982591255d9952f 100644 (file)
@@ -414,6 +414,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                if (dc->optimized_required || dc->wm_optimized_required)
                        return false;
 
+       dc_exit_ips_for_hw_access(dc);
+
        stream->adjust.v_total_max = adjust->v_total_max;
        stream->adjust.v_total_mid = adjust->v_total_mid;
        stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -454,6 +456,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
 
        int i = 0;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 
@@ -484,6 +488,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
        bool ret = false;
        struct crtc_position position;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe =
                                &dc->current_state->res_ctx.pipe_ctx[i];
@@ -603,6 +609,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
        if (pipe == NULL)
                return false;
 
+       dc_exit_ips_for_hw_access(dc);
+
        /* By default, capture the full frame */
        param.windowa_x_start = 0;
        param.windowa_y_start = 0;
@@ -662,6 +670,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
        struct pipe_ctx *pipe;
        struct timing_generator *tg;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe->stream == stream)
@@ -686,6 +696,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
        int i;
        struct pipe_ctx *pipe_ctx;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                if (dc->current_state->res_ctx.pipe_ctx[i].stream
                                == stream) {
@@ -721,6 +733,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
        if (option > DITHER_OPTION_MAX)
                return;
 
+       dc_exit_ips_for_hw_access(stream->ctx->dc);
+
        stream->dither_option = option;
 
        memset(&params, 0, sizeof(params));
@@ -745,6 +759,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
        bool ret = false;
        struct pipe_ctx *pipes;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
                        pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -762,6 +778,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
        bool ret = false;
        struct pipe_ctx *pipes;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                if (dc->current_state->res_ctx.pipe_ctx[i].stream
                                == stream) {
@@ -788,6 +806,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
        struct pipe_ctx *pipes_affected[MAX_PIPES];
        int num_pipes_affected = 0;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < num_streams; i++) {
                struct dc_stream_state *stream = streams[i];
 
@@ -1766,6 +1786,8 @@ void dc_enable_stereo(
        int i, j;
        struct pipe_ctx *pipe;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                if (context != NULL) {
                        pipe = &context->res_ctx.pipe_ctx[i];
@@ -1785,6 +1807,8 @@ void dc_enable_stereo(
 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
 {
        if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+               dc_exit_ips_for_hw_access(dc);
+
                enable_timing_multisync(dc, context);
                program_timing_sync(dc, context);
        }
@@ -2041,6 +2065,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
        if (!streams_changed(dc, streams, stream_count))
                return res;
 
+       dc_exit_ips_for_hw_access(dc);
+
        DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
 
        for (i = 0; i < stream_count; i++) {
@@ -3067,6 +3093,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
 
                        if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
                                resource_build_test_pattern_params(&context->res_ctx, otg_master);
+
+                       if (otg_master && (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
+                                       otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420))
+                               resource_build_subsampling_params(&context->res_ctx, otg_master);
                }
        }
 
@@ -3376,6 +3406,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
        int i, j;
        struct pipe_ctx *top_pipe_to_program = NULL;
        struct dc_stream_status *stream_status = NULL;
+       dc_exit_ips_for_hw_access(dc);
+
        dc_z10_restore(dc);
 
        top_pipe_to_program = resource_get_otg_master_for_stream(
@@ -3503,10 +3535,23 @@ static void commit_planes_for_stream(struct dc *dc,
        // dc->current_state anymore, so we have to cache it before we apply
        // the new SubVP context
        subvp_prev_use = false;
+       dc_exit_ips_for_hw_access(dc);
+
        dc_z10_restore(dc);
        if (update_type == UPDATE_TYPE_FULL)
                wait_for_outstanding_hw_updates(dc, context);
 
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->plane_state) {
+                       set_p_state_switch_method(dc, context, pipe);
+
+                       if (dc->debug.visual_confirm)
+                               dc_update_visual_confirm_color(dc, context, pipe);
+               }
+       }
+
        if (update_type == UPDATE_TYPE_FULL) {
                dc_allow_idle_optimizations(dc, false);
 
@@ -3541,17 +3586,6 @@ static void commit_planes_for_stream(struct dc *dc,
                }
        }
 
-       for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (pipe->stream && pipe->plane_state) {
-                       set_p_state_switch_method(dc, context, pipe);
-
-                       if (dc->debug.visual_confirm)
-                               dc_update_visual_confirm_color(dc, context, pipe);
-               }
-       }
-
        if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
                struct pipe_ctx *mpcc_pipe;
                struct pipe_ctx *odm_pipe;
@@ -3817,7 +3851,9 @@ static void commit_planes_for_stream(struct dc *dc,
                 * programming has completed (we turn on phantom OTG in order
                 * to complete the plane disable for phantom pipes).
                 */
-               dc->hwss.apply_ctx_to_hw(dc, context);
+
+               if (dc->hwss.disable_phantom_streams)
+                       dc->hwss.disable_phantom_streams(dc, context);
        }
 
        if (update_type != UPDATE_TYPE_FAST)
@@ -4382,6 +4418,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
        bool is_plane_addition = 0;
        bool is_fast_update_only;
 
+       dc_exit_ips_for_hw_access(dc);
+
        populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
        is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
                        surface_count, stream_update, stream);
@@ -4502,6 +4540,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
        int i, j;
        struct dc_fast_update fast_update[MAX_SURFACES] = {0};
 
+       dc_exit_ips_for_hw_access(dc);
+
        populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
        stream_status = dc_stream_get_status(stream);
        context = dc->current_state;
@@ -4686,6 +4726,8 @@ void dc_set_power_state(
        case DC_ACPI_CM_POWER_STATE_D0:
                dc_state_construct(dc, dc->current_state);
 
+               dc_exit_ips_for_hw_access(dc);
+
                dc_z10_restore(dc);
 
                dc->hwss.init_hw(dc);
@@ -4827,6 +4869,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
                dc->idle_optimizations_allowed = allow;
 }
 
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+       if (dc->caps.ips_support)
+               dc_allow_idle_optimizations(dc, false);
+}
+
 bool dc_dmub_is_ips_idle_state(struct dc *dc)
 {
        uint32_t idle_state = 0;
index 9fbdb09697fd5ea16abe86e4f970e80fb764ff7f..96ea283bd16904d955ea55cf4e4992bc1a41b4c5 100644 (file)
@@ -822,6 +822,16 @@ static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ct
                        stream->timing.v_border_bottom +
                        stream->timing.v_border_top;
 
+       /* Recout for ODM slices after the first slice need one extra left edge pixel
+        * for 3-tap chroma subsampling.
+        */
+       if (odm_slice_idx > 0 &&
+                       (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
+                               pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) {
+               odm_rec.x -= 1;
+               odm_rec.width += 1;
+       }
+
        return odm_rec;
 }
 
@@ -1438,6 +1448,7 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
        enum controller_dp_test_pattern controller_test_pattern;
        enum controller_dp_color_space controller_color_space;
        enum dc_color_depth color_depth = otg_master->stream->timing.display_color_depth;
+       enum dc_pixel_encoding pixel_encoding = otg_master->stream->timing.pixel_encoding;
        int h_active = otg_master->stream->timing.h_addressable +
                otg_master->stream->timing.h_border_left +
                otg_master->stream->timing.h_border_right;
@@ -1469,10 +1480,36 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx,
                else
                        params->width = last_odm_slice_width;
 
+               /* Extra left edge pixel is required for 3-tap chroma subsampling. */
+               if (i != 0 && (pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
+                               pixel_encoding == PIXEL_ENCODING_YCBCR420)) {
+                       params->offset -= 1;
+                       params->width += 1;
+               }
+
                offset += odm_slice_width;
        }
 }
 
+void resource_build_subsampling_params(struct resource_context *res_ctx,
+       struct pipe_ctx *otg_master)
+{
+       struct pipe_ctx *opp_heads[MAX_PIPES];
+       int odm_cnt = 1;
+       int i;
+
+       odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads);
+
+       /* For ODM slices after the first slice, extra left edge pixel is required
+        * for 3-tap chroma subsampling.
+        */
+       if (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 ||
+                       otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+               for (i = 0; i < odm_cnt; i++)
+                       opp_heads[i]->stream_res.left_edge_extra_pixel = (i == 0) ? false : true;
+       }
+}
+
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -1834,23 +1871,6 @@ int resource_find_any_free_pipe(struct resource_context *new_res_ctx,
 
 bool resource_is_pipe_type(const struct pipe_ctx *pipe_ctx, enum pipe_type type)
 {
-#ifdef DBG
-       if (pipe_ctx->stream == NULL) {
-               /* a free pipe with dangling states */
-               ASSERT(!pipe_ctx->plane_state);
-               ASSERT(!pipe_ctx->prev_odm_pipe);
-               ASSERT(!pipe_ctx->next_odm_pipe);
-               ASSERT(!pipe_ctx->top_pipe);
-               ASSERT(!pipe_ctx->bottom_pipe);
-       } else if (pipe_ctx->top_pipe) {
-               /* a secondary DPP pipe must be signed to a plane */
-               ASSERT(pipe_ctx->plane_state)
-       }
-       /* Add more checks here to prevent corrupted pipe ctx. It is very hard
-        * to debug this issue afterwards because we can't pinpoint the code
-        * location causing inconsistent pipe context states.
-        */
-#endif
        switch (type) {
        case OTG_MASTER:
                return !pipe_ctx->prev_odm_pipe &&
index 88c6436b28b69ca7f4791bdc47404cd5f73a5f83..180ac47868c22a68c1af47096db95ecf6b11994c 100644 (file)
@@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
                dc_stream_release(state->phantom_streams[i]);
                state->phantom_streams[i] = NULL;
        }
+       state->phantom_stream_count = 0;
 
        for (i = 0; i < state->phantom_plane_count; i++) {
                dc_plane_state_release(state->phantom_planes[i]);
                state->phantom_planes[i] = NULL;
        }
+       state->phantom_plane_count = 0;
+
        state->stream_mask = 0;
        memset(&state->res_ctx, 0, sizeof(state->res_ctx));
        memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
index 54670e0b15189552dd8ee85a5c1a6f905820be37..51a970fcb5d05d32484726a3993f4886592191a3 100644 (file)
@@ -423,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,
                return false;
        }
 
+       dc_exit_ips_for_hw_access(dc);
+
        wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
 
        dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
@@ -493,6 +495,8 @@ bool dc_stream_fc_disable_writeback(struct dc *dc,
                return false;
        }
 
+       dc_exit_ips_for_hw_access(dc);
+
        if (dwb->funcs->set_fc_enable)
                dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
 
@@ -542,6 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
                return false;
        }
 
+       dc_exit_ips_for_hw_access(dc);
+
        /* disable writeback */
        if (dc->hwss.disable_writeback) {
                struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
@@ -557,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
                int num_dwb,
                struct dc_writeback_info *wb_info)
 {
+       dc_exit_ips_for_hw_access(dc);
+
        if (dc->hwss.mmhubbub_warmup)
                return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
        else
@@ -569,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
        struct resource_context *res_ctx =
                &dc->current_state->res_ctx;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
 
@@ -597,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
        dc = stream->ctx->dc;
        res_ctx = &dc->current_state->res_ctx;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 
@@ -628,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
        struct resource_context *res_ctx =
                &dc->current_state->res_ctx;
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
 
@@ -664,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
        if (i == MAX_PIPES)
                return true;
 
+       dc_exit_ips_for_hw_access(dc);
+
        return dc->hwss.dmdata_status_done(pipe);
 }
 
@@ -698,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
 
        pipe_ctx->stream->dmdata_address = attr->address;
 
+       dc_exit_ips_for_hw_access(dc);
+
        dc->hwss.program_dmdata_engine(pipe_ctx);
 
        if (hubp->funcs->dmdata_set_attributes != NULL &&
index 19a2c7140ae8437c8f01828837a0fc37dd559ecf..19140fb65787c8eccd58b639bc9f1a1c7db1a5cc 100644 (file)
@@ -161,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(
                break;
        }
 
+       dc_exit_ips_for_hw_access(dc);
+
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx =
                                &dc->current_state->res_ctx.pipe_ctx[i];
index c9317ea0258ea1cb2f686830fddc7469158966cc..c789cc2e216de7e74b99d08e37417ba347051bd0 100644 (file)
@@ -51,7 +51,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.266"
+#define DC_VER "3.2.271"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -429,12 +429,12 @@ struct dc_config {
        bool force_bios_enable_lttpr;
        uint8_t force_bios_fixed_vs;
        int sdpif_request_limit_words_per_umc;
-       bool use_old_fixed_vs_sequence;
        bool dc_mode_clk_limit_support;
        bool EnableMinDispClkODM;
        bool enable_auto_dpm_test_logs;
        unsigned int disable_ips;
        unsigned int disable_ips_in_vpb;
+       bool usb4_bw_alloc_support;
 };
 
 enum visual_confirm {
@@ -987,9 +987,11 @@ struct dc_debug_options {
        bool psp_disabled_wa;
        unsigned int ips2_eval_delay_us;
        unsigned int ips2_entry_delay_us;
+       bool disable_dmub_reallow_idle;
        bool disable_timeout;
        bool disable_extblankadj;
        unsigned int static_screen_wait_frames;
+       bool force_chroma_subsampling_1tap;
 };
 
 struct gpu_info_soc_bounding_box_v1_0;
@@ -1068,6 +1070,7 @@ struct dc {
        } scratch;
 
        struct dml2_configuration_options dml2_options;
+       enum dc_acpi_cm_power_state power_state;
 };
 
 enum frame_buffer_mode {
@@ -2219,11 +2222,9 @@ struct dc_sink_dsc_caps {
        // 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),
        // 'false' if they are sink's DSC caps
        bool is_virtual_dpcd_dsc;
-#if defined(CONFIG_DRM_AMD_DC_FP)
        // 'true' if MST topology supports DSC passthrough for sink
        // 'false' if MST topology does not support DSC passthrough
        bool is_dsc_passthrough_supported;
-#endif
        struct dsc_dec_dpcd_caps dsc_dec_caps;
 };
 
@@ -2325,6 +2326,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
                                struct dc_cursor_attributes *cursor_attr);
 
 void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_exit_ips_for_hw_access(struct dc *dc);
 bool dc_dmub_is_ips_idle_state(struct dc *dc);
 
 /* set min and max memory clock to lowest and highest DPM level, respectively */
index 2b79a0e5638e1b757ea3d3527add517db139552e..a1477906fe4f8aa6a6624ed6795027e916f0c7a2 100644 (file)
@@ -74,7 +74,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
        struct dc_context *dc_ctx = dc_dmub_srv->ctx;
        enum dmub_status status;
 
-       status = dmub_srv_wait_for_idle(dmub, 100000);
+       do {
+               status = dmub_srv_wait_for_idle(dmub, 100000);
+       } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
+
        if (status != DMUB_STATUS_OK) {
                DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
                dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
@@ -145,7 +148,9 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
                        if (status == DMUB_STATUS_POWER_STATE_D3)
                                return false;
 
-                       dmub_srv_wait_for_idle(dmub, 100000);
+                       do {
+                               status = dmub_srv_wait_for_idle(dmub, 100000);
+                       } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 
                        /* Requeue the command. */
                        status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
@@ -186,7 +191,9 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
 
        // Wait for DMUB to process command
        if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
-               status = dmub_srv_wait_for_idle(dmub, 100000);
+               do {
+                       status = dmub_srv_wait_for_idle(dmub, 100000);
+               } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK);
 
                if (status != DMUB_STATUS_OK) {
                        DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@ -780,21 +787,22 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
        } else if (subvp_pipe->next_odm_pipe) {
                pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx;
        } else {
-               pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0;
+               pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF;
        }
 
        // Find phantom pipe index based on phantom stream
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
 
-               if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
+               if (resource_is_pipe_type(phantom_pipe, OTG_MASTER) &&
+                               phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
                        pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
                        if (phantom_pipe->bottom_pipe) {
                                pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
                        } else if (phantom_pipe->next_odm_pipe) {
                                pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst;
                        } else {
-                               pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0;
+                               pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF;
                        }
                        break;
                }
@@ -1195,6 +1203,9 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
        if (dc->debug.dmcub_emulation)
                return;
 
+       if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
+               return;
+
        memset(&cmd, 0, sizeof(cmd));
        cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
        cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
@@ -1205,13 +1216,15 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
        cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
 
        if (allow_idle) {
+               dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
                if (dc->hwss.set_idle_state)
                        dc->hwss.set_idle_state(dc, true);
        }
 
        /* NOTE: This does not use the "wake" interface since this is part of the wake path. */
        /* We also do not perform a wait since DMCUB could enter idle after the notification. */
-       dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+       dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
 }
 
 static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
@@ -1361,7 +1374,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
        else
                result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
 
-       if (result && reallow_idle)
+       if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 
        return result;
@@ -1410,7 +1423,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
 
        result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
 
-       if (result && reallow_idle)
+       if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
                dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
 
        return result;
index 811474f4419bd264a7774676542e09a4f2e1f328..aae2f3a2660d950230f9e714fb98dc1714380d2d 100644 (file)
@@ -827,9 +827,7 @@ struct dc_dsc_config {
        uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */
        bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */
        int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */
-#if defined(CONFIG_DRM_AMD_DC_FP)
        bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */
-#endif
        bool is_dp; /* indicate if DSC is applied based on DP's capability */
        uint32_t mst_pbn; /* pbn of display on dsc mst hub */
        const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
@@ -942,6 +940,7 @@ struct dc_crtc_timing {
        uint32_t hdmi_vic;
        uint32_t rid;
        uint32_t fr_index;
+       uint32_t frl_uncompressed_video_bandwidth_in_kbps;
        enum dc_timing_3d_format timing_3d_format;
        enum dc_color_depth display_color_depth;
        enum dc_pixel_encoding pixel_encoding;
index f0458b8f00af842b87ab91feadd71eef4c680e27..12f3c35b3a34cde701346eee73ae87f51076ff81 100644 (file)
@@ -239,27 +239,294 @@ static void check_audio_bandwidth_hdmi(
                }
        }
 }
+static struct fixed31_32 get_link_symbol_clk_freq_mhz(enum dc_link_rate link_rate)
+{
+       switch (link_rate) {
+       case LINK_RATE_LOW:
+               return dc_fixpt_from_int(162); /* 162 MHz */
+       case LINK_RATE_HIGH:
+               return dc_fixpt_from_int(270); /* 270 MHz */
+       case LINK_RATE_HIGH2:
+               return dc_fixpt_from_int(540); /* 540 MHz */
+       case LINK_RATE_HIGH3:
+               return dc_fixpt_from_int(810); /* 810 MHz */
+       case LINK_RATE_UHBR10:
+               return dc_fixpt_from_fraction(3125, 10); /* 312.5 MHz */
+       case LINK_RATE_UHBR13_5:
+               return dc_fixpt_from_fraction(421875, 1000); /* 421.875 MHz */
+       case LINK_RATE_UHBR20:
+               return dc_fixpt_from_int(625); /* 625 MHz */
+       default:
+               /* Unexpected case, this requires debug if encountered. */
+               ASSERT(0);
+               return dc_fixpt_from_int(0);
+       }
+}
+
+struct dp_audio_layout_config {
+       uint8_t layouts_per_sample_denom;
+       uint8_t symbols_per_layout;
+       uint8_t max_layouts_per_audio_sdp;
+};
+
+static void get_audio_layout_config(
+       uint32_t channel_count,
+       enum dp_link_encoding encoding,
+       struct dp_audio_layout_config *output)
+{
+       /* Assuming L-PCM audio. Current implementation uses max 1 layout per SDP,
+        * with each layout being the same size (8ch layout).
+        */
+       if (encoding == DP_8b_10b_ENCODING) {
+               if (channel_count == 2) {
+                       output->layouts_per_sample_denom = 4;
+                       output->symbols_per_layout = 40;
+                       output->max_layouts_per_audio_sdp = 1;
+               } else if (channel_count == 8 || channel_count == 6) {
+                       output->layouts_per_sample_denom = 1;
+                       output->symbols_per_layout = 40;
+                       output->max_layouts_per_audio_sdp = 1;
+               }
+       } else if (encoding == DP_128b_132b_ENCODING) {
+               if (channel_count == 2) {
+                       output->layouts_per_sample_denom = 4;
+                       output->symbols_per_layout = 10;
+                       output->max_layouts_per_audio_sdp = 1;
+               } else if (channel_count == 8 || channel_count == 6) {
+                       output->layouts_per_sample_denom = 1;
+                       output->symbols_per_layout = 10;
+                       output->max_layouts_per_audio_sdp = 1;
+               }
+       }
+}
 
-/*For DP SST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpsst(
+static uint32_t get_av_stream_map_lane_count(
+       enum dp_link_encoding encoding,
+       enum dc_lane_count lane_count,
+       bool is_mst)
+{
+       uint32_t av_stream_map_lane_count = 0;
+
+       if (encoding == DP_8b_10b_ENCODING) {
+               if (!is_mst)
+                       av_stream_map_lane_count = lane_count;
+               else
+                       av_stream_map_lane_count = 4;
+       } else if (encoding == DP_128b_132b_ENCODING) {
+               av_stream_map_lane_count = 4;
+       }
+
+       ASSERT(av_stream_map_lane_count != 0);
+
+       return av_stream_map_lane_count;
+}
+
+static uint32_t get_audio_sdp_overhead(
+       enum dp_link_encoding encoding,
+       enum dc_lane_count lane_count,
+       bool is_mst)
+{
+       uint32_t audio_sdp_overhead = 0;
+
+       if (encoding == DP_8b_10b_ENCODING) {
+               if (is_mst)
+                       audio_sdp_overhead = 16; /* 4 * 2 + 8 */
+               else
+                       audio_sdp_overhead = lane_count * 2 + 8;
+       } else if (encoding == DP_128b_132b_ENCODING) {
+               audio_sdp_overhead = 10; /* 4 x 2.5 */
+       }
+
+       ASSERT(audio_sdp_overhead != 0);
+
+       return audio_sdp_overhead;
+}
+
+static uint32_t calculate_required_audio_bw_in_symbols(
        const struct audio_crtc_info *crtc_info,
+       const struct dp_audio_layout_config *layout_config,
        uint32_t channel_count,
-       union audio_sample_rates *sample_rates)
+       uint32_t sample_rate_hz,
+       uint32_t av_stream_map_lane_count,
+       uint32_t audio_sdp_overhead)
+{
+       /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */
+       struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100);
+       struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction(
+                       crtc_info->requested_pixel_clock_100Hz, crtc_info->h_total * 10);
+       struct fixed31_32 samples_per_line;
+       struct fixed31_32 layouts_per_line;
+       struct fixed31_32 symbols_per_sdp_max_layout;
+       struct fixed31_32 remainder;
+       uint32_t num_sdp_with_max_layouts;
+       uint32_t required_symbols_per_hblank;
+
+       samples_per_line = dc_fixpt_from_fraction(sample_rate_hz, 1000);
+       samples_per_line = dc_fixpt_div(samples_per_line, horizontal_line_freq_khz);
+       layouts_per_line = dc_fixpt_div_int(samples_per_line, layout_config->layouts_per_sample_denom);
+
+       num_sdp_with_max_layouts = dc_fixpt_floor(
+                       dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp));
+       symbols_per_sdp_max_layout = dc_fixpt_from_int(
+                       layout_config->max_layouts_per_audio_sdp * layout_config->symbols_per_layout);
+       symbols_per_sdp_max_layout = dc_fixpt_add_int(symbols_per_sdp_max_layout, audio_sdp_overhead);
+       symbols_per_sdp_max_layout = dc_fixpt_mul(symbols_per_sdp_max_layout, audio_sdp_margin);
+       required_symbols_per_hblank = num_sdp_with_max_layouts;
+       required_symbols_per_hblank *= ((dc_fixpt_ceil(symbols_per_sdp_max_layout) + av_stream_map_lane_count) /
+                       av_stream_map_lane_count) *     av_stream_map_lane_count;
+
+       if (num_sdp_with_max_layouts != dc_fixpt_ceil(
+                       dc_fixpt_div_int(layouts_per_line, layout_config->max_layouts_per_audio_sdp))) {
+               remainder = dc_fixpt_sub_int(layouts_per_line,
+                               num_sdp_with_max_layouts * layout_config->max_layouts_per_audio_sdp);
+               remainder = dc_fixpt_mul_int(remainder, layout_config->symbols_per_layout);
+               remainder = dc_fixpt_add_int(remainder, audio_sdp_overhead);
+               remainder = dc_fixpt_mul(remainder, audio_sdp_margin);
+               required_symbols_per_hblank += ((dc_fixpt_ceil(remainder) + av_stream_map_lane_count) /
+                               av_stream_map_lane_count) * av_stream_map_lane_count;
+       }
+
+       return required_symbols_per_hblank;
+}
+
+/* Current calculation only applicable for 8b/10b MST and 128b/132b SST/MST.
+ */
+static uint32_t calculate_available_hblank_bw_in_symbols(
+       const struct audio_crtc_info *crtc_info,
+       const struct audio_dp_link_info *dp_link_info)
 {
-       /* do nothing */
+       uint64_t hblank = crtc_info->h_total - crtc_info->h_active;
+       struct fixed31_32 hblank_time_msec =
+                       dc_fixpt_from_fraction(hblank * 10, crtc_info->requested_pixel_clock_100Hz);
+       struct fixed31_32 lsclkfreq_mhz =
+                       get_link_symbol_clk_freq_mhz(dp_link_info->link_rate);
+       struct fixed31_32 average_stream_sym_bw_frac;
+       struct fixed31_32 peak_stream_bw_kbps;
+       struct fixed31_32 bits_per_pixel;
+       struct fixed31_32 link_bw_kbps;
+       struct fixed31_32 available_stream_sym_count;
+       uint32_t available_hblank_bw = 0; /* in stream symbols */
+
+       if (crtc_info->dsc_bits_per_pixel) {
+               bits_per_pixel = dc_fixpt_from_fraction(crtc_info->dsc_bits_per_pixel, 16);
+       } else {
+               switch (crtc_info->color_depth) {
+               case COLOR_DEPTH_666:
+                       bits_per_pixel = dc_fixpt_from_int(6);
+                       break;
+               case COLOR_DEPTH_888:
+                       bits_per_pixel = dc_fixpt_from_int(8);
+                       break;
+               case COLOR_DEPTH_101010:
+                       bits_per_pixel = dc_fixpt_from_int(10);
+                       break;
+               case COLOR_DEPTH_121212:
+                       bits_per_pixel = dc_fixpt_from_int(12);
+                       break;
+               default:
+                       /* Default to commonly supported color depth. */
+                       bits_per_pixel = dc_fixpt_from_int(8);
+                       break;
+               }
+
+               bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 3);
+
+               if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+                       bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 3);
+                       bits_per_pixel = dc_fixpt_mul_int(bits_per_pixel, 2);
+               } else if (crtc_info->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+                       bits_per_pixel = dc_fixpt_div_int(bits_per_pixel, 2);
+               }
+       }
+
+       /* Use simple stream BW calculation because mainlink overhead is
+        * accounted for separately in the audio BW calculations.
+        */
+       peak_stream_bw_kbps = dc_fixpt_from_fraction(crtc_info->requested_pixel_clock_100Hz, 10);
+       peak_stream_bw_kbps = dc_fixpt_mul(peak_stream_bw_kbps, bits_per_pixel);
+       link_bw_kbps = dc_fixpt_from_int(dp_link_info->link_bandwidth_kbps);
+       average_stream_sym_bw_frac = dc_fixpt_div(peak_stream_bw_kbps, link_bw_kbps);
+
+       available_stream_sym_count = dc_fixpt_mul_int(hblank_time_msec, 1000);
+       available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, lsclkfreq_mhz);
+       available_stream_sym_count = dc_fixpt_mul(available_stream_sym_count, average_stream_sym_bw_frac);
+       available_hblank_bw = dc_fixpt_floor(available_stream_sym_count);
+       available_hblank_bw *= dp_link_info->lane_count;
+       available_hblank_bw -= crtc_info->dsc_num_slices * 4; /* EOC overhead */
+
+       if (available_hblank_bw < dp_link_info->hblank_min_symbol_width)
+               available_hblank_bw = dp_link_info->hblank_min_symbol_width;
+
+       if (available_hblank_bw < 12)
+               available_hblank_bw = 0;
+       else
+               available_hblank_bw -= 12; /* Main link overhead */
+
+       return available_hblank_bw;
 }
 
-/*For DP MST, calculate if specified sample rates can fit into a given timing */
-static void check_audio_bandwidth_dpmst(
+static void check_audio_bandwidth_dp(
        const struct audio_crtc_info *crtc_info,
+       const struct audio_dp_link_info *dp_link_info,
        uint32_t channel_count,
        union audio_sample_rates *sample_rates)
 {
-       /* do nothing  */
+       struct dp_audio_layout_config layout_config = {0};
+       uint32_t available_hblank_bw;
+       uint32_t av_stream_map_lane_count;
+       uint32_t audio_sdp_overhead;
+
+       /* TODO: Add validation for SST 8b/10 case  */
+       if (!dp_link_info->is_mst && dp_link_info->encoding == DP_8b_10b_ENCODING)
+               return;
+
+       available_hblank_bw = calculate_available_hblank_bw_in_symbols(
+                       crtc_info, dp_link_info);
+       av_stream_map_lane_count = get_av_stream_map_lane_count(
+                       dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+       audio_sdp_overhead = get_audio_sdp_overhead(
+                       dp_link_info->encoding, dp_link_info->lane_count, dp_link_info->is_mst);
+       get_audio_layout_config(
+                       channel_count, dp_link_info->encoding, &layout_config);
+
+       if (layout_config.max_layouts_per_audio_sdp == 0 ||
+               layout_config.symbols_per_layout == 0 ||
+               layout_config.layouts_per_sample_denom == 0) {
+               return;
+       }
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 192000,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_192 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 176400,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_176_4 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 96000,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_96 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 88200,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_88_2 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 48000,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_48 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 44100,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_44_1 = 0;
+       if (available_hblank_bw < calculate_required_audio_bw_in_symbols(
+                       crtc_info, &layout_config, channel_count, 32000,
+                       av_stream_map_lane_count, audio_sdp_overhead))
+               sample_rates->rate.RATE_32 = 0;
 }
 
 static void check_audio_bandwidth(
        const struct audio_crtc_info *crtc_info,
+       const struct audio_dp_link_info *dp_link_info,
        uint32_t channel_count,
        enum signal_type signal,
        union audio_sample_rates *sample_rates)
@@ -271,12 +538,9 @@ static void check_audio_bandwidth(
                break;
        case SIGNAL_TYPE_EDP:
        case SIGNAL_TYPE_DISPLAY_PORT:
-               check_audio_bandwidth_dpsst(
-                       crtc_info, channel_count, sample_rates);
-               break;
        case SIGNAL_TYPE_DISPLAY_PORT_MST:
-               check_audio_bandwidth_dpmst(
-                       crtc_info, channel_count, sample_rates);
+               check_audio_bandwidth_dp(
+                       crtc_info, dp_link_info, channel_count, sample_rates);
                break;
        default:
                break;
@@ -394,7 +658,8 @@ void dce_aud_az_configure(
        struct audio *audio,
        enum signal_type signal,
        const struct audio_crtc_info *crtc_info,
-       const struct audio_info *audio_info)
+       const struct audio_info *audio_info,
+       const struct audio_dp_link_info *dp_link_info)
 {
        struct dce_audio *aud = DCE_AUD(audio);
 
@@ -529,6 +794,7 @@ void dce_aud_az_configure(
 
                                check_audio_bandwidth(
                                        crtc_info,
+                                       dp_link_info,
                                        channel_count,
                                        signal,
                                        &sample_rates);
@@ -588,6 +854,7 @@ void dce_aud_az_configure(
 
        check_audio_bandwidth(
                crtc_info,
+               dp_link_info,
                8,
                signal,
                &sample_rate);
index dbd2cfed0603085f25a67ca3700588716afb6cc0..539f881928d1010e2aea7e3ca6383d3263855ad4 100644 (file)
@@ -170,7 +170,8 @@ void dce_aud_az_disable(struct audio *audio);
 void dce_aud_az_configure(struct audio *audio,
        enum signal_type signal,
        const struct audio_crtc_info *crtc_info,
-       const struct audio_info *audio_info);
+       const struct audio_info *audio_info,
+       const struct audio_dp_link_info *dp_link_info);
 
 void dce_aud_wall_dto_setup(struct audio *audio,
        enum signal_type signal,
index 38e4797e9476ca8de7ba6ad92c6906db5f9823c1..b010814706fec49634b4e76aee6c7c0cfaed4efc 100644 (file)
@@ -258,7 +258,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
                *residency = 0;
 }
 
-/**
+/*
  * Set REPLAY power optimization flags and coasting vtotal.
  */
 static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
@@ -280,7 +280,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
        dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
 }
 
-/**
+/*
  * send Replay general cmd to DMUB.
  */
 static void dmub_replay_send_cmd(struct dmub_replay *dmub,
index 3538973bd0c6cb7a23a7ffdd2f70883e354ed34f..b7e57aa27361952ddf17985e0e8adfce2bdc4a56 100644 (file)
@@ -62,6 +62,26 @@ void cm_helper_program_color_matrices(
 
 }
 
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+                                  uint16_t *regval,
+                                  const struct color_matrices_reg *reg)
+{
+       uint32_t cur_csc_reg, regval0, regval1;
+       unsigned int i = 0;
+
+       for (cur_csc_reg = reg->csc_c11_c12;
+            cur_csc_reg <= reg->csc_c33_c34; cur_csc_reg++) {
+               REG_GET_2(cur_csc_reg,
+                               csc_c11, &regval0,
+                               csc_c12, &regval1);
+
+               regval[2 * i] = regval0;
+               regval[(2 * i) + 1] = regval1;
+
+               i++;
+       }
+}
+
 void cm_helper_program_xfer_func(
                struct dc_context *ctx,
                const struct pwl_params *params,
index 0a68b63d61260b4118ffe11ff48980ef80e5189d..decc50b1ac53c422547ff821b0c3bbe72418154d 100644 (file)
@@ -114,5 +114,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
                                const struct dc_transfer_func *output_tf,
                                struct pwl_params *lut_params);
 
-
+void cm_helper_read_color_matrices(struct dc_context *ctx,
+                                  uint16_t *regval,
+                                  const struct color_matrices_reg *reg);
 #endif
index ef52e6b6eccfbd1ea1a85df0094c2e8363380576..4e391fd1d71caa4cbe6eab34544e5704bff038b7 100644 (file)
@@ -543,7 +543,8 @@ static const struct dpp_funcs dcn10_dpp_funcs = {
                .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier,
                .dpp_program_blnd_lut = NULL,
                .dpp_program_shaper_lut = NULL,
-               .dpp_program_3dlut = NULL
+               .dpp_program_3dlut = NULL,
+               .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap,
 };
 
 static struct dpp_caps dcn10_dpp_cap = {
index c9e045666dcc89854a17492c6df79fe1afc2524c..a039eedc7c24b7258d26cb9e7b2f7061833334a9 100644 (file)
@@ -1521,4 +1521,7 @@ void dpp1_construct(struct dcn10_dpp *dpp1,
        const struct dcn_dpp_registers *tf_regs,
        const struct dcn_dpp_shift *tf_shift,
        const struct dcn_dpp_mask *tf_mask);
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust);
 #endif
index 904c2d2789987bce097fa805182a49dc9675a71e..2f994a3a0b9cdb9c13590775abe8dd3a80cf161b 100644 (file)
@@ -98,7 +98,7 @@ static void program_gamut_remap(
 
        if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
                REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
-                               CM_GAMUT_REMAP_MODE, 0);
+                       CM_GAMUT_REMAP_MODE, 0);
                return;
        }
        switch (select) {
@@ -181,6 +181,74 @@ void dpp1_cm_set_gamut_remap(
        }
 }
 
+static void read_gamut_remap(struct dcn10_dpp *dpp,
+                            uint16_t *regval,
+                            enum gamut_remap_select *select)
+{
+       struct color_matrices_reg gam_regs;
+       uint32_t selection;
+
+       REG_GET(CM_GAMUT_REMAP_CONTROL,
+                                       CM_GAMUT_REMAP_MODE, &selection);
+
+       *select = selection;
+
+       gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+       gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+       gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+       gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+       if (*select == GAMUT_REMAP_COEFF) {
+
+               gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+               cm_helper_read_color_matrices(
+                               dpp->base.ctx,
+                               regval,
+                               &gam_regs);
+
+       } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+               gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+               cm_helper_read_color_matrices(
+                               dpp->base.ctx,
+                               regval,
+                               &gam_regs);
+
+       } else if (*select == GAMUT_REMAP_COMB_COEFF) {
+
+               gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+               cm_helper_read_color_matrices(
+                               dpp->base.ctx,
+                               regval,
+                               &gam_regs);
+       }
+}
+
+void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust)
+{
+       struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+       uint16_t arr_reg_val[12];
+       enum gamut_remap_select select;
+
+       read_gamut_remap(dpp, arr_reg_val, &select);
+
+       if (select == GAMUT_REMAP_BYPASS) {
+               adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+               return;
+       }
+
+       adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+       convert_hw_matrix(adjust->temperature_matrix,
+                         arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
 static void dpp1_cm_program_color_matrix(
                struct dcn10_dpp *dpp,
                const uint16_t *regval)
index 0dec57679269b0da486b00b328ad1377494ca1e5..48a40dcc7050bd597ed7e5b7d645b0764db2c572 100644 (file)
@@ -23,6 +23,7 @@
  *
  */
 
+#include "core_types.h"
 #include "dm_services.h"
 #include "dcn10_opp.h"
 #include "reg_helper.h"
@@ -160,6 +161,9 @@ static void opp1_set_pixel_encoding(
        struct dcn10_opp *oppn10,
        const struct clamping_and_pixel_encoding_params *params)
 {
+       bool force_chroma_subsampling_1tap =
+                       oppn10->base.ctx->dc->debug.force_chroma_subsampling_1tap;
+
        switch (params->pixel_encoding) {
 
        case PIXEL_ENCODING_RGB:
@@ -178,6 +182,9 @@ static void opp1_set_pixel_encoding(
        default:
                break;
        }
+
+       if (force_chroma_subsampling_1tap)
+               REG_UPDATE(FMT_CONTROL, FMT_SUBSAMPLING_MODE, 0);
 }
 
 /**
index eaa7032f0f1a3c11f71e99d5dfc1526f8861eb94..1516c0a4872663655e0ccd60aa543bc4bc97f78e 100644 (file)
@@ -55,21 +55,23 @@ void dpp20_read_state(struct dpp *dpp_base,
 
        REG_GET(DPP_CONTROL,
                        DPP_CLOCK_ENABLE, &s->is_enabled);
+
+       // Degamma LUT (RAM)
        REG_GET(CM_DGAM_CONTROL,
-                       CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
-       // BGAM has no ROM, and definition is different, can't reuse same dump
-       //REG_GET(CM_BLNDGAM_CONTROL,
-       //              CM_BLNDGAM_LUT_MODE, &s->rgam_lut_mode);
-       REG_GET(CM_GAMUT_REMAP_CONTROL,
-                       CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode);
-       if (s->gamut_remap_mode) {
-               s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12);
-               s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14);
-               s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22);
-               s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24);
-               s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32);
-               s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34);
-       }
+               CM_DGAM_LUT_MODE, &s->dgam_lut_mode);
+
+       // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+       REG_GET(CM_SHAPER_CONTROL,
+               CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+       REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL,
+                 CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode,
+                 CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+       REG_GET(CM_3DLUT_MODE,
+               CM_3DLUT_SIZE, &s->lut3d_size);
+
+       // Blend/Out Gamma (RAM)
+       REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK,
+               CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode);
 }
 
 void dpp2_power_on_obuf(
@@ -393,6 +395,7 @@ static struct dpp_funcs dcn20_dpp_funcs = {
        .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
        .dpp_dppclk_control = dpp1_dppclk_control,
        .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+       .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
 };
 
 static struct dpp_caps dcn20_dpp_cap = {
index e735363d0051bcfd412f870f806472970e002a0c..672cde46c4b9af6bd7df74b01a39287324bfb76c 100644 (file)
@@ -775,4 +775,7 @@ bool dpp2_construct(struct dcn20_dpp *dpp2,
 void dpp2_power_on_obuf(
                struct dpp *dpp_base,
        bool power_on);
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust);
 #endif /* __DC_HWSS_DCN20_H__ */
index 598caa508d431183b21e96b3ec87254e75b43568..58dc69926e8a81666300a6d51d47fcbf4d26e451 100644 (file)
@@ -234,6 +234,61 @@ void dpp2_cm_set_gamut_remap(
        }
 }
 
+static void read_gamut_remap(struct dcn20_dpp *dpp,
+                            uint16_t *regval,
+                            enum dcn20_gamut_remap_select *select)
+{
+       struct color_matrices_reg gam_regs;
+       uint32_t selection;
+
+       IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA,
+                  CM_TEST_DEBUG_DATA_STATUS_IDX,
+                  CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection);
+
+       *select = selection;
+
+       gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+       gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+       gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+       gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+       if (*select == DCN2_GAMUT_REMAP_COEF_A) {
+               gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+               cm_helper_read_color_matrices(dpp->base.ctx,
+                                             regval,
+                                             &gam_regs);
+
+       } else if (*select == DCN2_GAMUT_REMAP_COEF_B) {
+               gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+               cm_helper_read_color_matrices(dpp->base.ctx,
+                                             regval,
+                                             &gam_regs);
+       }
+}
+
+void dpp2_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust)
+{
+       struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base);
+       uint16_t arr_reg_val[12];
+       enum dcn20_gamut_remap_select select;
+
+       read_gamut_remap(dpp, arr_reg_val, &select);
+
+       if (select == DCN2_GAMUT_REMAP_BYPASS) {
+               adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+               return;
+       }
+
+       adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+       convert_hw_matrix(adjust->temperature_matrix,
+                         arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
 void dpp2_program_input_csc(
                struct dpp *dpp_base,
                enum dc_color_space color_space,
index 5da6e44f284a6892ba2a99ddc7f964b50c785b45..16b5ff208d14781617c45f9d4e4628b35e823203 100644 (file)
@@ -542,8 +542,30 @@ static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
        return NULL;
 }
 
+static void mpc2_read_mpcc_state(
+               struct mpc *mpc,
+               int mpcc_inst,
+               struct mpcc_state *s)
+{
+       struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
+
+       REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+       REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+       REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+       REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+                       MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+                       MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+                       MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+       REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+                       MPCC_BUSY, &s->busy);
+
+       /* Gamma block state */
+       REG_GET(MPCC_OGAM_LUT_RAM_CONTROL[mpcc_inst],
+               MPCC_OGAM_CONFIG_STATUS, &s->rgam_mode);
+}
+
 static const struct mpc_funcs dcn20_mpc_funcs = {
-       .read_mpcc_state = mpc1_read_mpcc_state,
+       .read_mpcc_state = mpc2_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
        .remove_mpcc = mpc1_remove_mpcc,
        .mpc_init = mpc1_mpc_init,
index a7268027a472af981da4109b58c09732d12bd3a7..f809a7d21033320e9b0f47200d5d158e51218b3d 100644 (file)
@@ -275,6 +275,7 @@ static struct dpp_funcs dcn201_dpp_funcs = {
        .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
        .dpp_dppclk_control = dpp1_dppclk_control,
        .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+       .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap,
 };
 
 static struct dpp_caps dcn201_dpp_cap = {
index 11f7746f3a656a2a9ad430cb96ff42534a70f90e..a3a769aad0420100ad0251c4f8f71e714e645b8b 100644 (file)
 void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
 {
        struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+       uint32_t gamcor_lut_mode, rgam_lut_mode;
 
        REG_GET(DPP_CONTROL,
-                       DPP_CLOCK_ENABLE, &s->is_enabled);
+               DPP_CLOCK_ENABLE, &s->is_enabled);
+
+       // Pre-degamma (ROM)
+       REG_GET_2(PRE_DEGAM,
+                 PRE_DEGAM_MODE, &s->pre_dgam_mode,
+                 PRE_DEGAM_SELECT, &s->pre_dgam_select);
+
+       // Gamma Correction (RAM)
+       REG_GET(CM_GAMCOR_CONTROL,
+               CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode);
+       if (s->gamcor_mode) {
+               REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode);
+               if (!gamcor_lut_mode)
+                       s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+       }
 
-       // TODO: Implement for DCN3
+       // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size)
+       REG_GET(CM_SHAPER_CONTROL,
+               CM_SHAPER_LUT_MODE, &s->shaper_lut_mode);
+       REG_GET(CM_3DLUT_MODE,
+               CM_3DLUT_MODE_CURRENT, &s->lut3d_mode);
+       REG_GET(CM_3DLUT_READ_WRITE_CONTROL,
+               CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+       REG_GET(CM_3DLUT_MODE,
+               CM_3DLUT_SIZE, &s->lut3d_size);
+
+       // Blend/Out Gamma (RAM)
+       REG_GET(CM_BLNDGAM_CONTROL,
+               CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode);
+       if (s->rgam_lut_mode){
+               REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode);
+               if (!rgam_lut_mode)
+                       s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B
+       }
 }
+
 /*program post scaler scs block in dpp CM*/
 void dpp3_program_post_csc(
                struct dpp *dpp_base,
@@ -1462,6 +1495,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
        .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
        .dpp_dppclk_control             = dpp1_dppclk_control,
        .dpp_set_hdr_multiplier         = dpp3_set_hdr_multiplier,
+       .dpp_get_gamut_remap            = dpp3_cm_get_gamut_remap,
 };
 
 
index cea3208e4ab1109719c144bd65df96ca6c81ad57..2ac8045a87a1365e1ebb3ff854c410abfa7da5ef 100644 (file)
@@ -637,4 +637,6 @@ void dpp3_program_cm_dealpha(
                struct dpp *dpp_base,
        uint32_t enable, uint32_t additive_blending);
 
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust);
 #endif /* __DC_HWSS_DCN30_H__ */
index e43f77c11c00825aad64ada6ddfb4b0bdce23aff..54ec144f7b812cc9f1aab1458f06405ee1d0a4d6 100644 (file)
@@ -408,3 +408,57 @@ void dpp3_cm_set_gamut_remap(
                program_gamut_remap(dpp, arr_reg_val, gamut_mode);
        }
 }
+
+static void read_gamut_remap(struct dcn3_dpp *dpp,
+                            uint16_t *regval,
+                            int *select)
+{
+       struct color_matrices_reg gam_regs;
+       uint32_t selection;
+
+       //current coefficient set in use
+       REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection);
+
+       *select = selection;
+
+       gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+       gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+       gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+       gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+       if (*select == GAMUT_REMAP_COEFF) {
+               gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+               cm_helper_read_color_matrices(dpp->base.ctx,
+                                             regval,
+                                             &gam_regs);
+
+       } else if (*select == GAMUT_REMAP_COMA_COEFF) {
+               gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12);
+               gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34);
+
+               cm_helper_read_color_matrices(dpp->base.ctx,
+                                             regval,
+                                             &gam_regs);
+       }
+}
+
+void dpp3_cm_get_gamut_remap(struct dpp *dpp_base,
+                            struct dpp_grph_csc_adjustment *adjust)
+{
+       struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+       uint16_t arr_reg_val[12];
+       int select;
+
+       read_gamut_remap(dpp, arr_reg_val, &select);
+
+       if (select == GAMUT_REMAP_BYPASS) {
+               adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+               return;
+       }
+
+       adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+       convert_hw_matrix(adjust->temperature_matrix,
+                         arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
index d1500b2238580b526dfbea2b3ece636295f76c59..bf3386cd444d62c5232909f9b88061e6abf98887 100644 (file)
@@ -1129,6 +1129,64 @@ void mpc3_set_gamut_remap(
        }
 }
 
+static void read_gamut_remap(struct dcn30_mpc *mpc30,
+                            int mpcc_id,
+                            uint16_t *regval,
+                            uint32_t *select)
+{
+       struct color_matrices_reg gam_regs;
+
+       //current coefficient set in use
+       REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id], MPCC_GAMUT_REMAP_MODE_CURRENT, select);
+
+       gam_regs.shifts.csc_c11 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
+       gam_regs.masks.csc_c11  = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
+       gam_regs.shifts.csc_c12 = mpc30->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
+       gam_regs.masks.csc_c12 = mpc30->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
+
+       if (*select == GAMUT_REMAP_COEFF) {
+               gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_A[mpcc_id]);
+               gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_A[mpcc_id]);
+
+               cm_helper_read_color_matrices(
+                               mpc30->base.ctx,
+                               regval,
+                               &gam_regs);
+
+       } else  if (*select == GAMUT_REMAP_COMA_COEFF) {
+
+               gam_regs.csc_c11_c12 = REG(MPC_GAMUT_REMAP_C11_C12_B[mpcc_id]);
+               gam_regs.csc_c33_c34 = REG(MPC_GAMUT_REMAP_C33_C34_B[mpcc_id]);
+
+               cm_helper_read_color_matrices(
+                               mpc30->base.ctx,
+                               regval,
+                               &gam_regs);
+
+       }
+
+}
+
+void mpc3_get_gamut_remap(struct mpc *mpc,
+                         int mpcc_id,
+                         struct mpc_grph_gamut_adjustment *adjust)
+{
+       struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+       uint16_t arr_reg_val[12];
+       int select;
+
+       read_gamut_remap(mpc30, mpcc_id, arr_reg_val, &select);
+
+       if (select == GAMUT_REMAP_BYPASS) {
+               adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+               return;
+       }
+
+       adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+       convert_hw_matrix(adjust->temperature_matrix,
+                         arr_reg_val, ARRAY_SIZE(arr_reg_val));
+}
+
 bool mpc3_program_3dlut(
                struct mpc *mpc,
                const struct tetrahedral_params *params,
@@ -1382,8 +1440,54 @@ static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc)
        }
 }
 
+static void mpc3_read_mpcc_state(
+               struct mpc *mpc,
+               int mpcc_inst,
+               struct mpcc_state *s)
+{
+       struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+       uint32_t rmu_status = 0xf;
+
+       REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id);
+       REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id);
+       REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id);
+       REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode,
+                       MPCC_ALPHA_BLND_MODE, &s->alpha_mode,
+                       MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha,
+                       MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only);
+       REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle,
+                       MPCC_BUSY, &s->busy);
+
+       /* Color blocks state */
+       REG_GET(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, &rmu_status);
+
+       if (rmu_status == mpcc_inst) {
+               REG_GET(SHAPER_CONTROL[0],
+                       MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+               REG_GET(RMU_3DLUT_MODE[0],
+                       MPC_RMU_3DLUT_MODE_CURRENT,  &s->lut3d_mode);
+               REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[0],
+                       MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+               REG_GET(RMU_3DLUT_MODE[0],
+                       MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+       } else {
+               REG_GET(SHAPER_CONTROL[1],
+                       MPC_RMU_SHAPER_LUT_MODE_CURRENT, &s->shaper_lut_mode);
+               REG_GET(RMU_3DLUT_MODE[1],
+                       MPC_RMU_3DLUT_MODE_CURRENT,  &s->lut3d_mode);
+               REG_GET(RMU_3DLUT_READ_WRITE_CONTROL[1],
+                       MPC_RMU_3DLUT_30BIT_EN, &s->lut3d_bit_depth);
+               REG_GET(RMU_3DLUT_MODE[1],
+                       MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
+       }
+
+        REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
+                 MPCC_OGAM_MODE_CURRENT, &s->rgam_mode,
+                 MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
+}
+
 static const struct mpc_funcs dcn30_mpc_funcs = {
-       .read_mpcc_state = mpc1_read_mpcc_state,
+       .read_mpcc_state = mpc3_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
        .remove_mpcc = mpc1_remove_mpcc,
        .mpc_init = mpc1_mpc_init,
index 5198f2167c7c8eb7b55d8aa7453010f7460cb726..9cb96ae95a2f753639e3149ee55757dae44bffd3 100644 (file)
@@ -1056,6 +1056,10 @@ void mpc3_set_gamut_remap(
        int mpcc_id,
        const struct mpc_grph_gamut_adjustment *adjust);
 
+void mpc3_get_gamut_remap(struct mpc *mpc,
+                         int mpcc_id,
+                         struct mpc_grph_gamut_adjustment *adjust);
+
 void mpc3_set_rmu_mux(
        struct mpc *mpc,
        int rmu_idx,
index dcf12a0b031c78fa658ee1261ae066d037e13636..681e75c6dbaf4389bdbaf4373c8e560bae86da9b 100644 (file)
@@ -133,6 +133,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
        .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
        .dpp_dppclk_control                     = dpp1_dppclk_control,
        .dpp_set_hdr_multiplier         = dpp3_set_hdr_multiplier,
+       .dpp_get_gamut_remap            = dpp3_cm_get_gamut_remap,
 };
 
 
index 4229369c57f4b4e8123697ac922ae76e8106ed33..f4d3f04ec85791fa394c50b3e4de167debc70815 100644 (file)
@@ -26,6 +26,9 @@
 #ifndef DM_CP_PSP_IF__H
 #define DM_CP_PSP_IF__H
 
+/*
+ * Interface to CPLIB/PSP to enable ASSR
+ */
 struct dc_link;
 
 struct cp_psp_stream_config {
index 6042a5a6a44f8c32187b2bea702892572f08ec57..59ade76ffb18d56f26a6b329b850462150214c04 100644 (file)
@@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
index 63c48c29ba4910beec4d9e6492b0894f9867d8bb..e7f4a2d491ccf42343afa24ba422e8cb84f57126 100644 (file)
@@ -4273,7 +4273,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
        //Calculate Swath, DET Configuration, DCFCLKDeepSleep
        //
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        for (k = 0; k < v->NumberOfActivePlanes; ++k) {
                                v->RequiredDPPCLKThisState[k] = v->RequiredDPPCLK[i][j][k];
@@ -4576,7 +4576,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
        //Calculate Return BW
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
                                if (v->BlendingAndTiming[k] == k) {
@@ -4635,7 +4635,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                        v->UrgentOutOfOrderReturnPerChannelVMDataOnly);
        v->FinalDRAMClockChangeLatency = (v->DRAMClockChangeLatencyOverride > 0 ? v->DRAMClockChangeLatencyOverride : v->DRAMClockChangeLatency);
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        v->DCFCLKState[i][j] = v->DCFCLKPerState[i];
                }
@@ -4646,7 +4646,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
                if (v->ClampMinDCFCLK) {
                        /* Clamp calculated values to actual minimum */
-                       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+                       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                                for (j = 0; j <= 1; ++j) {
                                        if (v->DCFCLKState[i][j] < mode_lib->soc.min_dcfclk) {
                                                v->DCFCLKState[i][j] = mode_lib->soc.min_dcfclk;
@@ -4656,7 +4656,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                }
        }
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        v->IdealSDPPortBandwidthPerState[i][j] = dml_min3(
                                        v->ReturnBusWidth * v->DCFCLKState[i][j],
@@ -4674,7 +4674,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
        //Re-ordering Buffer Support Check
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        if ((v->ROBBufferSizeInKByte - v->PixelChunkSizeInKByte) * 1024 / v->ReturnBWPerState[i][j]
                                        > (v->RoundTripPingLatencyCycles + 32) / v->DCFCLKState[i][j] + ReorderingBytes / v->ReturnBWPerState[i][j]) {
@@ -4692,7 +4692,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                MaxTotalVActiveRDBandwidth = MaxTotalVActiveRDBandwidth + v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k];
        }
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        v->MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min(
                                        v->IdealSDPPortBandwidthPerState[i][j] * v->MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation / 100,
@@ -4708,7 +4708,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
 
        //Prefetch Check
 
-       for (i = 0; i < mode_lib->soc.num_states; ++i) {
+       for (i = start_state; i < mode_lib->soc.num_states; ++i) {
                for (j = 0; j <= 1; ++j) {
                        int NextPrefetchModeState = MinPrefetchMode;
 
index 3eb3a021ab7d72f672b0029d333e32f4f58af559..3f02bb806d421a224401aa2bde415e564f453e8b 100644 (file)
@@ -266,6 +266,17 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
                                        optimal_uclk_for_dcfclk_sta_targets[i] =
                                                        bw_params->clk_table.entries[j].memclk_mhz * 16;
                                        break;
+                               } else {
+                                       /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+                                        * This is required for dcn303 because it just so happens that the memory
+                                        * bandwidth is low enough such that all the optimal DCFCLK for each UCLK
+                                        * is lower than the smallest DCFCLK STA target. In this case we need to
+                                        * populate the optimal UCLK for each DCFCLK STA target to be the max UCLK.
+                                        */
+                                       if (j == num_uclk_states - 1) {
+                                               optimal_uclk_for_dcfclk_sta_targets[i] =
+                                                               bw_params->clk_table.entries[j].memclk_mhz * 16;
+                                       }
                                }
                        }
                }
index dd781a20692ee68847aadccea38fecc69a2e3683..ba76dd4a2ce29a68a75883b8e8538395195b4089 100644 (file)
@@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
        return updated;
 }
 
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
                struct dc_state *context, struct vba_vars_st *v, int *split,
                bool *merge)
 {
@@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
 {
        int i;
        unsigned int new_vlevel;
+       unsigned int cur_policy[MAX_PIPES];
 
-       for (i = 0; i < pipe_cnt; i++)
+       for (i = 0; i < pipe_cnt; i++) {
+               cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
                pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+       }
 
        new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
 
@@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
                memset(merge, 0, MAX_PIPES * sizeof(bool));
                *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
                context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+       } else {
+               for (i = 0; i < pipe_cnt; i++)
+                       pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
        }
 }
 
@@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
                }
        }
 
-       if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+       if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
                try_odm_power_optimization_and_revalidate(
                                dc, context, pipes, split, merge, vlevel, *pipe_cnt);
 
@@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
                int i;
 
                pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
-               dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
+               if (!dc->config.enable_windowed_mpo_odm)
+                       dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
 
                /* repopulate_pipes = 1 means the pipes were either split or merged. In this case
                 * we have to re-calculate the DET allocation and run through DML once more to
index 7ea2bd5374d51b138d13179ab7444d0d8d2ef3a7..912256006d7567a712ca191346389cd9f10ac974 100644 (file)
@@ -583,9 +583,9 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
                        plane_count++;
        }
 
-       if (plane_count == 0) {
+       if (context->stream_count == 0 || plane_count == 0) {
                support = DCN_ZSTATE_SUPPORT_ALLOW;
-       } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+       } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
                struct dc_link *link = context->streams[0]->sink->link;
                bool is_pwrseq0 = link && link->link_index == 0;
                bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
index 0baf39d64a2d4f33ab30f35e63502fd6ba8c569e..a52c594e1ba4be7551168a01d5f43a0186423e79 100644 (file)
@@ -141,14 +141,33 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
 {
        int i;
        unsigned int num_found = 0;
-       unsigned int plane_id_assigned_to_pipe;
+       unsigned int plane_id_assigned_to_pipe = -1;
 
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
-               if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
-                       state->res_ctx.pipe_ctx[i].stream->stream_id,
-                       ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
-                       if (plane_id_assigned_to_pipe == plane_id)
-                               pipes[num_found++] = i;
+               struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+               if (!pipe->plane_state || !pipe->stream)
+                       continue;
+
+               get_plane_id(ctx, state, pipe->plane_state, pipe->stream->stream_id,
+                                       ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[pipe->pipe_idx],
+                                       &plane_id_assigned_to_pipe);
+               if (plane_id_assigned_to_pipe == plane_id && !pipe->prev_odm_pipe
+                               && (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) {
+                       while (pipe) {
+                               struct pipe_ctx *mpc_pipe = pipe;
+
+                               while (mpc_pipe) {
+                                       pipes[num_found++] = mpc_pipe->pipe_idx;
+                                       mpc_pipe = mpc_pipe->bottom_pipe;
+                                       if (!mpc_pipe)
+                                               break;
+                                       if (mpc_pipe->plane_state != pipe->plane_state)
+                                               mpc_pipe = NULL;
+                               }
+                               pipe = pipe->next_odm_pipe;
+                       }
+                       break;
                }
        }
 
@@ -566,8 +585,14 @@ static unsigned int find_pipes_assigned_to_stream(struct dml2_context *ctx, stru
        unsigned int num_found = 0;
 
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
-               if (state->res_ctx.pipe_ctx[i].stream && state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id) {
-                       pipes[num_found++] = i;
+               struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->stream->stream_id == stream_id && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+                       while (pipe) {
+                               pipes[num_found++] = pipe->pipe_idx;
+                               pipe = pipe->next_odm_pipe;
+                       }
+                       break;
                }
        }
 
index 1068b962d1c12bf5145a7c64ae7ddfa991d0b198..f15d1dbad6a968204be0971776ad46650f2b7d0c 100644 (file)
@@ -234,7 +234,7 @@ static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state
                if (state->streams[i]->stream_id == stream_id) {
                        for (j = 0; j < state->stream_status[i].plane_count; j++) {
                                if (state->stream_status[i].plane_states[j] == plane &&
-                                       (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
+                                       (!is_plane_duplicate || (j == plane_index))) {
                                        *plane_id = (i << 16) | j;
                                        return true;
                                }
index 0df6c55eb32608e95b927a70a4b15ff3fec9e3f3..ac41f9c0a2834189ecfba441cdb463ce1d745bfc 100644 (file)
@@ -137,6 +137,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
        if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
                kbps = apply_128b_132b_stream_overhead(timing, kbps);
 
+       if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
+                       timing->vic == 0 && timing->hdmi_vic == 0 &&
+                       timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
+               kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
+
        return kbps;
 }
 
index 2352428bcea3cab551aef88fe8bc781c443f6914..9d5df4c0da59796ebd8a16927cba72f17239c4fc 100644 (file)
@@ -1291,6 +1291,46 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
        }
 }
 
+static void populate_audio_dp_link_info(
+       const struct pipe_ctx *pipe_ctx,
+       struct audio_dp_link_info *dp_link_info)
+{
+       const struct dc_stream_state *stream = pipe_ctx->stream;
+       const struct dc_link *link = stream->link;
+       struct fixed31_32 link_bw_kbps;
+
+       dp_link_info->encoding = link->dc->link_srv->dp_get_encoding_format(
+                               &pipe_ctx->link_config.dp_link_settings);
+       dp_link_info->is_mst = (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+       dp_link_info->lane_count = pipe_ctx->link_config.dp_link_settings.lane_count;
+       dp_link_info->link_rate = pipe_ctx->link_config.dp_link_settings.link_rate;
+
+       link_bw_kbps = dc_fixpt_from_int(dc_link_bandwidth_kbps(link,
+                       &pipe_ctx->link_config.dp_link_settings));
+
+       /* For audio stream calculations, the video stream should not include FEC or SSC
+        * in order to get the most pessimistic values.
+        */
+       if (dp_link_info->encoding == DP_8b_10b_ENCODING &&
+                       link->dc->link_srv->dp_is_fec_supported(link)) {
+               link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+                               dc_fixpt_from_fraction(100, DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100));
+       } else if (dp_link_info->encoding == DP_128b_132b_ENCODING) {
+               link_bw_kbps = dc_fixpt_mul(link_bw_kbps,
+                               dc_fixpt_from_fraction(10000, 9975)); /* 99.75% SSC overhead*/
+       }
+
+       dp_link_info->link_bandwidth_kbps = dc_fixpt_floor(link_bw_kbps);
+
+       /* HW minimum for 128b/132b HBlank is 4 frame symbols.
+        * TODO: Plumb the actual programmed HBlank min symbol width to here.
+        */
+       if (dp_link_info->encoding == DP_128b_132b_ENCODING)
+               dp_link_info->hblank_min_symbol_width = 4;
+       else
+               dp_link_info->hblank_min_symbol_width = 0;
+}
+
 static void build_audio_output(
        struct dc_state *state,
        const struct pipe_ctx *pipe_ctx,
@@ -1338,6 +1378,15 @@ static void build_audio_output(
        audio_output->crtc_info.calculated_pixel_clock_100Hz =
                        pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz;
 
+       audio_output->crtc_info.pixel_encoding =
+               stream->timing.pixel_encoding;
+
+       audio_output->crtc_info.dsc_bits_per_pixel =
+                       stream->timing.dsc_cfg.bits_per_pixel;
+
+       audio_output->crtc_info.dsc_num_slices =
+                       stream->timing.dsc_cfg.num_slices_h;
+
 /*for HDMI, audio ACR is with deep color ratio factor*/
        if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) &&
                audio_output->crtc_info.requested_pixel_clock_100Hz ==
@@ -1371,6 +1420,10 @@ static void build_audio_output(
 
        audio_output->pll_info.ss_percentage =
                        pipe_ctx->pll_settings.ss_percentage;
+
+       if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+               populate_audio_dp_link_info(pipe_ctx, &audio_output->dp_link_info);
+       }
 }
 
 static void program_scaler(const struct dc *dc,
@@ -1476,7 +1529,7 @@ static enum dc_status dce110_enable_stream_timing(
        return DC_OK;
 }
 
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
                struct pipe_ctx *pipe_ctx,
                struct dc_state *context,
                struct dc *dc)
@@ -1507,7 +1560,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
                                pipe_ctx->stream_res.audio,
                                pipe_ctx->stream->signal,
                                &audio_output.crtc_info,
-                               &pipe_ctx->stream->audio_info);
+                               &pipe_ctx->stream->audio_info,
+                               &audio_output.dp_link_info);
        }
 
        /* make sure no pipes syncd to the pipe being enabled */
@@ -2302,7 +2356,7 @@ enum dc_status dce110_apply_ctx_to_hw(
                if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
                        continue;
 
-               status = apply_single_controller_ctx_to_hw(
+               status = dce110_apply_single_controller_ctx_to_hw(
                                pipe_ctx,
                                context,
                                dc);
index 08028a1779ae819282ab2394de57c4b8f266a9f3..ed3cc3648e8e23f8d076b92e10a23791253f9662 100644 (file)
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
                struct dc *dc,
                struct dc_state *context);
 
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context,
+               struct dc *dc);
 
 void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
 
index 6dd479e8a348502c9b285a38f16650fb7cb4f95e..314798400b16e93f60769e0176bba1ab8fbf2b04 100644 (file)
@@ -283,33 +283,33 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
        DTN_INFO("\n");
 }
 
-void dcn10_log_hw_state(struct dc *dc,
-       struct dc_log_buffer_ctx *log_ctx)
+static void dcn10_log_color_state(struct dc *dc,
+                                 struct dc_log_buffer_ctx *log_ctx)
 {
        struct dc_context *dc_ctx = dc->ctx;
        struct resource_pool *pool = dc->res_pool;
        int i;
 
-       DTN_INFO_BEGIN();
-
-       dcn10_log_hubbub_state(dc, log_ctx);
-
-       dcn10_log_hubp_states(dc, log_ctx);
-
-       DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
-                       "  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
-                       "C31 C32   C33 C34\n");
+       DTN_INFO("DPP:    IGAM format    IGAM mode    DGAM mode    RGAM mode"
+                "  GAMUT adjust  "
+                "C11        C12        C13        C14        "
+                "C21        C22        C23        C24        "
+                "C31        C32        C33        C34        \n");
        for (i = 0; i < pool->pipe_count; i++) {
                struct dpp *dpp = pool->dpps[i];
                struct dcn_dpp_state s = {0};
 
                dpp->funcs->dpp_read_state(dpp, &s);
+               dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
 
                if (!s.is_enabled)
                        continue;
 
-               DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
-                               "%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
+               DTN_INFO("[%2d]:  %11xh  %11s    %9s    %9s"
+                        "  %12s  "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld",
                                dpp->inst,
                                s.igam_input_format,
                                (s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -329,16 +329,42 @@ void dcn10_log_hw_state(struct dc *dc,
                                        ((s.rgam_lut_mode == 3) ? "RAM" :
                                        ((s.rgam_lut_mode == 4) ? "RAM" :
                                                                 "Unknown")))),
-                               s.gamut_remap_mode,
-                               s.gamut_remap_c11_c12,
-                               s.gamut_remap_c13_c14,
-                               s.gamut_remap_c21_c22,
-                               s.gamut_remap_c23_c24,
-                               s.gamut_remap_c31_c32,
-                               s.gamut_remap_c33_c34);
+                               (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+                                       ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+                                                                                 "SW"),
+                               s.gamut_remap.temperature_matrix[0].value,
+                               s.gamut_remap.temperature_matrix[1].value,
+                               s.gamut_remap.temperature_matrix[2].value,
+                               s.gamut_remap.temperature_matrix[3].value,
+                               s.gamut_remap.temperature_matrix[4].value,
+                               s.gamut_remap.temperature_matrix[5].value,
+                               s.gamut_remap.temperature_matrix[6].value,
+                               s.gamut_remap.temperature_matrix[7].value,
+                               s.gamut_remap.temperature_matrix[8].value,
+                               s.gamut_remap.temperature_matrix[9].value,
+                               s.gamut_remap.temperature_matrix[10].value,
+                               s.gamut_remap.temperature_matrix[11].value);
                DTN_INFO("\n");
        }
        DTN_INFO("\n");
+       DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
+                "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+                "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
+                "  blnd_lut:%d  oscs:%d\n\n",
+                dc->caps.color.dpp.input_lut_shared,
+                dc->caps.color.dpp.icsc,
+                dc->caps.color.dpp.dgam_ram,
+                dc->caps.color.dpp.dgam_rom_caps.srgb,
+                dc->caps.color.dpp.dgam_rom_caps.bt2020,
+                dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+                dc->caps.color.dpp.dgam_rom_caps.pq,
+                dc->caps.color.dpp.dgam_rom_caps.hlg,
+                dc->caps.color.dpp.post_csc,
+                dc->caps.color.dpp.gamma_corr,
+                dc->caps.color.dpp.dgam_rom_for_yuv,
+                dc->caps.color.dpp.hw_3d_lut,
+                dc->caps.color.dpp.ogam_ram,
+                dc->caps.color.dpp.ocsc);
 
        DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
        for (i = 0; i < pool->pipe_count; i++) {
@@ -352,6 +378,30 @@ void dcn10_log_hw_state(struct dc *dc,
                                s.idle);
        }
        DTN_INFO("\n");
+       DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+                dc->caps.color.mpc.gamut_remap,
+                dc->caps.color.mpc.num_3dluts,
+                dc->caps.color.mpc.ogam_ram,
+                dc->caps.color.mpc.ocsc);
+}
+
+void dcn10_log_hw_state(struct dc *dc,
+                       struct dc_log_buffer_ctx *log_ctx)
+{
+       struct dc_context *dc_ctx = dc->ctx;
+       struct resource_pool *pool = dc->res_pool;
+       int i;
+
+       DTN_INFO_BEGIN();
+
+       dcn10_log_hubbub_state(dc, log_ctx);
+
+       dcn10_log_hubp_states(dc, log_ctx);
+
+       if (dc->hwss.log_color_state)
+               dc->hwss.log_color_state(dc, log_ctx);
+       else
+               dcn10_log_color_state(dc, log_ctx);
 
        DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
 
@@ -1840,6 +1890,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
 {
        struct dpp *dpp = pipe_ctx->plane_res.dpp;
 
+       if (!stream)
+               return false;
+
        if (dpp == NULL)
                return false;
 
@@ -1862,8 +1915,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
        } else
                dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
 
-       if (stream != NULL && stream->ctx != NULL &&
-                       stream->out_transfer_func != NULL) {
+       if (stream->ctx &&
+           stream->out_transfer_func) {
                log_tf(stream->ctx,
                                stream->out_transfer_func,
                                dpp->regamma_params.hw_points_num);
index 4853ecac53f91f90a774192236e2d74ba6779c08..bc0a21957e33f97930df9b16582c8bb500026fde 100644 (file)
 #define FN(reg_name, field_name) \
        hws->shifts->field_name, hws->masks->field_name
 
+void dcn20_log_color_state(struct dc *dc,
+                          struct dc_log_buffer_ctx *log_ctx)
+{
+       struct dc_context *dc_ctx = dc->ctx;
+       struct resource_pool *pool = dc->res_pool;
+       int i;
+
+       DTN_INFO("DPP:  DGAM mode  SHAPER mode  3DLUT mode  3DLUT bit depth"
+                "  3DLUT size  RGAM mode  GAMUT adjust  "
+                "C11        C12        C13        C14        "
+                "C21        C22        C23        C24        "
+                "C31        C32        C33        C34        \n");
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct dpp *dpp = pool->dpps[i];
+               struct dcn_dpp_state s = {0};
+
+               dpp->funcs->dpp_read_state(dpp, &s);
+               dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+               if (!s.is_enabled)
+                       continue;
+
+               DTN_INFO("[%2d]:  %8s  %11s  %10s  %15s  %10s  %9s  %12s  "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld",
+                       dpp->inst,
+                       (s.dgam_lut_mode == 0) ? "Bypass" :
+                        ((s.dgam_lut_mode == 1) ? "sRGB" :
+                        ((s.dgam_lut_mode == 2) ? "Ycc" :
+                        ((s.dgam_lut_mode == 3) ? "RAM" :
+                        ((s.dgam_lut_mode == 4) ? "RAM" :
+                                                  "Unknown")))),
+                       (s.shaper_lut_mode == 1) ? "RAM A" :
+                        ((s.shaper_lut_mode == 2) ? "RAM B" :
+                                                    "Bypass"),
+                       (s.lut3d_mode == 1) ? "RAM A" :
+                        ((s.lut3d_mode == 2) ? "RAM B" :
+                                               "Bypass"),
+                       (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+                       (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+                       (s.rgam_lut_mode == 1) ? "RAM A" :
+                        ((s.rgam_lut_mode == 1) ? "RAM B" : "Bypass"),
+                       (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+                        ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+                                                                  "SW"),
+                       s.gamut_remap.temperature_matrix[0].value,
+                       s.gamut_remap.temperature_matrix[1].value,
+                       s.gamut_remap.temperature_matrix[2].value,
+                       s.gamut_remap.temperature_matrix[3].value,
+                       s.gamut_remap.temperature_matrix[4].value,
+                       s.gamut_remap.temperature_matrix[5].value,
+                       s.gamut_remap.temperature_matrix[6].value,
+                       s.gamut_remap.temperature_matrix[7].value,
+                       s.gamut_remap.temperature_matrix[8].value,
+                       s.gamut_remap.temperature_matrix[9].value,
+                       s.gamut_remap.temperature_matrix[10].value,
+                       s.gamut_remap.temperature_matrix[11].value);
+               DTN_INFO("\n");
+       }
+       DTN_INFO("\n");
+       DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
+                "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+                "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
+                "  blnd_lut:%d  oscs:%d\n\n",
+                dc->caps.color.dpp.input_lut_shared,
+                dc->caps.color.dpp.icsc,
+                dc->caps.color.dpp.dgam_ram,
+                dc->caps.color.dpp.dgam_rom_caps.srgb,
+                dc->caps.color.dpp.dgam_rom_caps.bt2020,
+                dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+                dc->caps.color.dpp.dgam_rom_caps.pq,
+                dc->caps.color.dpp.dgam_rom_caps.hlg,
+                dc->caps.color.dpp.post_csc,
+                dc->caps.color.dpp.gamma_corr,
+                dc->caps.color.dpp.dgam_rom_for_yuv,
+                dc->caps.color.dpp.hw_3d_lut,
+                dc->caps.color.dpp.ogam_ram,
+                dc->caps.color.dpp.ocsc);
+
+       DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE"
+                "  OGAM mode\n");
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct mpcc_state s = {0};
+
+               pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+               if (s.opp_id != 0xf)
+                       DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d  %9s\n",
+                               i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+                               s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+                               s.idle,
+                               (s.rgam_mode == 1) ? "RAM A" :
+                                ((s.rgam_mode == 2) ? "RAM B" :
+                                                      "Bypass"));
+       }
+       DTN_INFO("\n");
+       DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+                dc->caps.color.mpc.gamut_remap,
+                dc->caps.color.mpc.num_3dluts,
+                dc->caps.color.mpc.ogam_ram,
+                dc->caps.color.mpc.ocsc);
+}
+
+
 static int find_free_gsl_group(const struct dc *dc)
 {
        if (dc->res_pool->gsl_groups.gsl_0 == 0)
@@ -1467,7 +1573,8 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
         * makes this assumption at the moment with how hubp reset is matched to
         * same index mpcc reset.
         */
-       if (old_pipe->stream_res.opp != new_pipe->stream_res.opp)
+       if (old_pipe->stream_res.opp != new_pipe->stream_res.opp ||
+                       old_pipe->stream_res.left_edge_extra_pixel != new_pipe->stream_res.left_edge_extra_pixel)
                new_pipe->update_flags.bits.opp_changed = 1;
        if (old_pipe->stream_res.tg != new_pipe->stream_res.tg)
                new_pipe->update_flags.bits.tg_changed = 1;
@@ -1853,6 +1960,10 @@ static void dcn20_program_pipe(
                        pipe_ctx->stream_res.opp,
                        &pipe_ctx->stream->bit_depth_params,
                        &pipe_ctx->stream->clamping);
+
+               pipe_ctx->stream_res.opp->funcs->opp_program_left_edge_extra_pixel(
+                       pipe_ctx->stream_res.opp,
+                       pipe_ctx->stream_res.left_edge_extra_pixel);
        }
 
        /* Set ABM pipe after other pipe configurations done */
@@ -1958,7 +2069,6 @@ void dcn20_program_front_end_for_ctx(
                                && context->res_ctx.pipe_ctx[i].stream)
                        hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
 
-
        /* Disconnect mpcc */
        for (i = 0; i < dc->res_pool->pipe_count; i++)
                if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2561,7 +2671,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
                tg->funcs->setup_vertical_interrupt2(tg, start_line);
 }
 
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
                struct dc_state *context)
index b94c85340abff7c02f3ec59025b04c8417d77bd6..5c874f7b0683ed4c208565f63d0cc49fdf63249d 100644 (file)
@@ -28,6 +28,8 @@
 
 #include "hw_sequencer_private.h"
 
+void dcn20_log_color_state(struct dc *dc,
+                          struct dc_log_buffer_ctx *log_ctx);
 bool dcn20_set_blend_lut(
        struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state);
 bool dcn20_set_shaper_3dlut(
@@ -84,6 +86,10 @@ enum dc_status dcn20_enable_stream_timing(
 void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
 void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+               struct dc *dc,
+               struct pipe_ctx *pipe_ctx,
+               struct dc_state *context);
 void dcn20_init_blank(
                struct dc *dc,
                struct timing_generator *tg);
index 8e88dcaf88f5b2b709a95abf9e0673390e27daa5..5c7f380a84f91ecb1a668e4798be6aaf9347a46f 100644 (file)
@@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
 void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
 {
        struct abm *abm = pipe_ctx->stream_res.abm;
-       uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
        struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
        struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+       uint32_t otg_inst;
+
+       if (!abm && !tg && !panel_cntl)
+               return;
+
+       otg_inst = tg->inst;
 
        if (dmcu) {
                dce110_set_pipe(pipe_ctx);
                return;
        }
 
-       if (abm && panel_cntl) {
-               if (abm->funcs && abm->funcs->set_pipe_ex) {
-                       abm->funcs->set_pipe_ex(abm,
+       if (abm->funcs && abm->funcs->set_pipe_ex) {
+               abm->funcs->set_pipe_ex(abm,
                                        otg_inst,
                                        SET_ABM_PIPE_NORMAL,
                                        panel_cntl->inst,
                                        panel_cntl->pwrseq_inst);
-               } else {
-                               dmub_abm_set_pipe(abm, otg_inst,
-                                               SET_ABM_PIPE_NORMAL,
-                                               panel_cntl->inst,
-                                               panel_cntl->pwrseq_inst);
-               }
+       } else {
+               dmub_abm_set_pipe(abm, otg_inst,
+                                 SET_ABM_PIPE_NORMAL,
+                                 panel_cntl->inst,
+                                 panel_cntl->pwrseq_inst);
        }
 }
 
@@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
 {
        struct dc_context *dc = pipe_ctx->stream->ctx;
        struct abm *abm = pipe_ctx->stream_res.abm;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
        struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+       uint32_t otg_inst;
+
+       if (!abm && !tg && !panel_cntl)
+               return false;
+
+       otg_inst = tg->inst;
 
        if (dc->dc->res_pool->dmcu) {
                dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
                return true;
        }
 
-       if (abm != NULL) {
-               uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
-
-               if (abm && panel_cntl) {
-                       if (abm->funcs && abm->funcs->set_pipe_ex) {
-                               abm->funcs->set_pipe_ex(abm,
-                                               otg_inst,
-                                               SET_ABM_PIPE_NORMAL,
-                                               panel_cntl->inst,
-                                               panel_cntl->pwrseq_inst);
-                       } else {
-                                       dmub_abm_set_pipe(abm,
-                                                       otg_inst,
-                                                       SET_ABM_PIPE_NORMAL,
-                                                       panel_cntl->inst,
-                                                       panel_cntl->pwrseq_inst);
-                       }
-               }
+       if (abm->funcs && abm->funcs->set_pipe_ex) {
+               abm->funcs->set_pipe_ex(abm,
+                                       otg_inst,
+                                       SET_ABM_PIPE_NORMAL,
+                                       panel_cntl->inst,
+                                       panel_cntl->pwrseq_inst);
+       } else {
+               dmub_abm_set_pipe(abm,
+                                 otg_inst,
+                                 SET_ABM_PIPE_NORMAL,
+                                 panel_cntl->inst,
+                                 panel_cntl->pwrseq_inst);
        }
 
-       if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+       if (abm->funcs && abm->funcs->set_backlight_level_pwm)
                abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
                        frame_ramp, 0, panel_cntl->inst);
        else
index c34c13e1e0a4ea918de9a9e36dbe305ce5224485..7e6b7f2a6dc9ea799c3a4b0a23b5d3e4c6d26b17 100644 (file)
 #define FN(reg_name, field_name) \
        hws->shifts->field_name, hws->masks->field_name
 
+void dcn30_log_color_state(struct dc *dc,
+                          struct dc_log_buffer_ctx *log_ctx)
+{
+       struct dc_context *dc_ctx = dc->ctx;
+       struct resource_pool *pool = dc->res_pool;
+       int i;
+
+       DTN_INFO("DPP:  DGAM ROM  DGAM ROM type  DGAM LUT  SHAPER mode"
+                "  3DLUT mode  3DLUT bit depth  3DLUT size  RGAM mode"
+                "  GAMUT adjust  "
+                "C11        C12        C13        C14        "
+                "C21        C22        C23        C24        "
+                "C31        C32        C33        C34        \n");
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct dpp *dpp = pool->dpps[i];
+               struct dcn_dpp_state s = {0};
+
+               dpp->funcs->dpp_read_state(dpp, &s);
+               dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+
+               if (!s.is_enabled)
+                       continue;
+
+               DTN_INFO("[%2d]:  %7x  %13s  %8s  %11s  %10s  %15s  %10s  %9s"
+                        "  %12s  "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld "
+                        "%010lld %010lld %010lld %010lld",
+                       dpp->inst,
+                       s.pre_dgam_mode,
+                       (s.pre_dgam_select == 0) ? "sRGB" :
+                        ((s.pre_dgam_select == 1) ? "Gamma 2.2" :
+                        ((s.pre_dgam_select == 2) ? "Gamma 2.4" :
+                        ((s.pre_dgam_select == 3) ? "Gamma 2.6" :
+                        ((s.pre_dgam_select == 4) ? "BT.709" :
+                        ((s.pre_dgam_select == 5) ? "PQ" :
+                        ((s.pre_dgam_select == 6) ? "HLG" :
+                                                    "Unknown")))))),
+                       (s.gamcor_mode == 0) ? "Bypass" :
+                        ((s.gamcor_mode == 1) ? "RAM A" :
+                                                "RAM B"),
+                       (s.shaper_lut_mode == 1) ? "RAM A" :
+                        ((s.shaper_lut_mode == 2) ? "RAM B" :
+                                                    "Bypass"),
+                       (s.lut3d_mode == 1) ? "RAM A" :
+                        ((s.lut3d_mode == 2) ? "RAM B" :
+                                               "Bypass"),
+                       (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+                       (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+                       (s.rgam_lut_mode == 0) ? "Bypass" :
+                        ((s.rgam_lut_mode == 1) ? "RAM A" :
+                                                  "RAM B"),
+                       (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+                               ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+                                                                         "SW"),
+                       s.gamut_remap.temperature_matrix[0].value,
+                       s.gamut_remap.temperature_matrix[1].value,
+                       s.gamut_remap.temperature_matrix[2].value,
+                       s.gamut_remap.temperature_matrix[3].value,
+                       s.gamut_remap.temperature_matrix[4].value,
+                       s.gamut_remap.temperature_matrix[5].value,
+                       s.gamut_remap.temperature_matrix[6].value,
+                       s.gamut_remap.temperature_matrix[7].value,
+                       s.gamut_remap.temperature_matrix[8].value,
+                       s.gamut_remap.temperature_matrix[9].value,
+                       s.gamut_remap.temperature_matrix[10].value,
+                       s.gamut_remap.temperature_matrix[11].value);
+               DTN_INFO("\n");
+       }
+       DTN_INFO("\n");
+       DTN_INFO("DPP Color Caps: input_lut_shared:%d  icsc:%d"
+                "  dgam_ram:%d  dgam_rom: srgb:%d,bt2020:%d,gamma2_2:%d,pq:%d,hlg:%d"
+                "  post_csc:%d  gamcor:%d  dgam_rom_for_yuv:%d  3d_lut:%d"
+                "  blnd_lut:%d  oscs:%d\n\n",
+                dc->caps.color.dpp.input_lut_shared,
+                dc->caps.color.dpp.icsc,
+                dc->caps.color.dpp.dgam_ram,
+                dc->caps.color.dpp.dgam_rom_caps.srgb,
+                dc->caps.color.dpp.dgam_rom_caps.bt2020,
+                dc->caps.color.dpp.dgam_rom_caps.gamma2_2,
+                dc->caps.color.dpp.dgam_rom_caps.pq,
+                dc->caps.color.dpp.dgam_rom_caps.hlg,
+                dc->caps.color.dpp.post_csc,
+                dc->caps.color.dpp.gamma_corr,
+                dc->caps.color.dpp.dgam_rom_for_yuv,
+                dc->caps.color.dpp.hw_3d_lut,
+                dc->caps.color.dpp.ogam_ram,
+                dc->caps.color.dpp.ocsc);
+
+       DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE"
+                "  SHAPER mode  3DLUT mode  3DLUT bit-depth  3DLUT size  OGAM mode  OGAM LUT"
+                "  GAMUT adjust  "
+                "C11        C12        C13        C14        "
+                "C21        C22        C23        C24        "
+                "C31        C32        C33        C34        \n");
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct mpcc_state s = {0};
+
+               pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
+               mpc3_get_gamut_remap(pool->mpc, i,  &s.gamut_remap);
+
+               if (s.opp_id != 0xf)
+                       DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d  %11s %11s %16s %11s %10s %9s"
+                                "  %-12s  "
+                                "%010lld %010lld %010lld %010lld "
+                                "%010lld %010lld %010lld %010lld "
+                                "%010lld %010lld %010lld %010lld\n",
+                               i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
+                               s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
+                               s.idle,
+                               (s.shaper_lut_mode == 1) ? "RAM A" :
+                                ((s.shaper_lut_mode == 2) ? "RAM B" :
+                                                            "Bypass"),
+                               (s.lut3d_mode == 1) ? "RAM A" :
+                                ((s.lut3d_mode == 2) ? "RAM B" :
+                                                       "Bypass"),
+                               (s.lut3d_bit_depth <= 0) ? "12-bit" : "10-bit",
+                               (s.lut3d_size == 0) ? "17x17x17" : "9x9x9",
+                               (s.rgam_mode == 0) ? "Bypass" :
+                                ((s.rgam_mode == 2) ? "RAM" :
+                                                      "Unknown"),
+                               (s.rgam_mode == 1) ? "B" : "A",
+                               (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+                                       ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
+                                                                                 "SW"),
+                               s.gamut_remap.temperature_matrix[0].value,
+                               s.gamut_remap.temperature_matrix[1].value,
+                               s.gamut_remap.temperature_matrix[2].value,
+                               s.gamut_remap.temperature_matrix[3].value,
+                               s.gamut_remap.temperature_matrix[4].value,
+                               s.gamut_remap.temperature_matrix[5].value,
+                               s.gamut_remap.temperature_matrix[6].value,
+                               s.gamut_remap.temperature_matrix[7].value,
+                               s.gamut_remap.temperature_matrix[8].value,
+                               s.gamut_remap.temperature_matrix[9].value,
+                               s.gamut_remap.temperature_matrix[10].value,
+                               s.gamut_remap.temperature_matrix[11].value);
+
+       }
+       DTN_INFO("\n");
+       DTN_INFO("MPC Color Caps: gamut_remap:%d, 3dlut:%d, ogam_ram:%d, ocsc:%d\n\n",
+                dc->caps.color.mpc.gamut_remap,
+                dc->caps.color.mpc.num_3dluts,
+                dc->caps.color.mpc.ogam_ram,
+                dc->caps.color.mpc.ocsc);
+}
+
 bool dcn30_set_blend_lut(
        struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
 {
@@ -1015,21 +1164,3 @@ void dcn30_prepare_bandwidth(struct dc *dc,
        if (!dc->clk_mgr->clks.fw_based_mclk_switching)
                dc_dmub_srv_p_state_delegate(dc, false, context);
 }
-
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
-               int num_pipes, const struct dc_static_screen_params *params)
-{
-       unsigned int i;
-       unsigned int triggers = 0;
-
-       if (params->triggers.surface_update)
-               triggers |= 0x100;
-       if (params->triggers.cursor_update)
-               triggers |= 0x8;
-       if (params->triggers.force_trigger)
-               triggers |= 0x1;
-
-       for (i = 0; i < num_pipes; i++)
-               pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
-                                       triggers, params->num_frames);
-}
index e557e2b9861870bb17ed1ac70f73e636888528d1..638f018a3cb57d17a935c37f27e228dd54a3d07e 100644 (file)
@@ -52,6 +52,9 @@ bool dcn30_mmhubbub_warmup(
        unsigned int num_dwb,
        struct dc_writeback_info *wb_info);
 
+void dcn30_log_color_state(struct dc *dc,
+                          struct dc_log_buffer_ctx *log_ctx);
+
 bool dcn30_set_blend_lut(struct pipe_ctx *pipe_ctx,
                const struct dc_plane_state *plane_state);
 
@@ -90,7 +93,4 @@ void dcn30_set_hubp_blank(const struct dc *dc,
 void dcn30_prepare_bandwidth(struct dc *dc,
        struct dc_state *context);
 
-void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
-               int num_pipes, const struct dc_static_screen_params *params);
-
 #endif /* __DC_HWSS_DCN30_H__ */
index 9894caedffed7393e04639ad3adda8c3defe0f6d..ef913445a79573cee099460a1d44a4c27092d945 100644 (file)
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn10_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
index 7423880fabb6e3b0d1a1003bab1fa8ff4898a936..a760f0c6fe98f22536abe25ebcdde5445fa8399f 100644 (file)
@@ -98,10 +98,8 @@ static void enable_memory_low_power(struct dc *dc)
                for (i = 0; i < dc->res_pool->stream_enc_count; i++)
                        if (dc->res_pool->stream_enc[i]->vpg)
                                dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
-#if defined(CONFIG_DRM_AMD_DC_FP)
                for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
                        dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
-#endif
        }
 
 }
@@ -617,3 +615,21 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
        if (hws->ctx->dc->debug.hpo_optimization)
                REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
 }
+
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+               int num_pipes, const struct dc_static_screen_params *params)
+{
+       unsigned int i;
+       unsigned int triggers = 0;
+
+       if (params->triggers.surface_update)
+               triggers |= 0x100;
+       if (params->triggers.cursor_update)
+               triggers |= 0x8;
+       if (params->triggers.force_trigger)
+               triggers |= 0x1;
+
+       for (i = 0; i < num_pipes; i++)
+               pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+                                       triggers, params->num_frames);
+}
index edfc01d6ad7378be5873ec2f6b72a63c2991320f..b8bc939da1554fcb9d36bd9ae0ae110523761440 100644 (file)
@@ -56,4 +56,8 @@ bool dcn31_is_abm_supported(struct dc *dc,
 void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
 void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
 
+void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+               int num_pipes, const struct dc_static_screen_params *params);
+
+
 #endif /* __DC_HWSS_DCN31_H__ */
index 669f524bd064d50cc24f87d0b0585d215ce99639..c06cc2c5da920f457c1ba422a7ffa8f59db9d6c0 100644 (file)
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn31_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
index ccb7e317e86af1b2ea8c0a5669a05a18612c2aee..542ce3b7f9e4d12d4bf7c6fcab2b4ce05c01f4f5 100644 (file)
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn31_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
index 6c9299c7683df19b3c444b865d297182d91ae7b3..aa36d7a56ca8c3b6f3cd47e67455ba67549bf73b 100644 (file)
@@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
        }
 }
 
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+       struct dce_hwseq *hws = dc->hwseq;
+       int i;
+
+       for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+               struct pipe_ctx *pipe_ctx_old =
+                       &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (!pipe_ctx_old->stream)
+                       continue;
+
+               if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+                       continue;
+
+               if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+                       continue;
+
+               if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+                               (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+                       struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+                       if (hws->funcs.reset_back_end_for_pipe)
+                               hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+                       if (hws->funcs.enable_stream_gating)
+                               hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+                       if (old_clk)
+                               old_clk->funcs->cs_power_down(old_clk);
+               }
+       }
+}
+
 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
 {
        unsigned int i;
+       enum dc_status status = DC_OK;
+       struct dce_hwseq *hws = dc->hwseq;
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
                }
        }
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
-               struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
-               if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
-                       // If old context or new context has phantom pipes, apply
-                       // the phantom timings now. We can't change the phantom
-                       // pipe configuration safely without driver acquiring
-                       // the DMCUB lock first.
-                       dc->hwss.apply_ctx_to_hw(dc, context);
-                       break;
+               struct pipe_ctx *pipe_ctx_old =
+                                       &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+               if (pipe_ctx->stream == NULL)
+                       continue;
+
+               if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+                       continue;
+
+               if (pipe_ctx->stream == pipe_ctx_old->stream &&
+                       pipe_ctx->stream->link->link_state_valid) {
+                       continue;
                }
+
+               if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+                       continue;
+
+               if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+                       continue;
+
+               if (hws->funcs.apply_single_controller_ctx_to_hw)
+                       status = hws->funcs.apply_single_controller_ctx_to_hw(
+                                       pipe_ctx,
+                                       context,
+                                       dc);
+
+               ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+               if (hws->funcs.resync_fifo_dccg_dio)
+                       hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
        }
 }
 
index cecf7f0f567190b257cf81e5f756b5a916eba09c..069e20bc87c0a75af028168253219fc9343b1af3 100644 (file)
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
 
 void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
 
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
 void dcn32_init_blank(
                struct dc *dc,
                struct timing_generator *tg);
index 427cfc8c24a4b7ed4cee1f0b6955cbe371797219..2b073123d3ede2eb16ae766af9a285511fc6e8ea 100644 (file)
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn31_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
        .commit_subvp_config = dcn32_commit_subvp_config,
        .enable_phantom_streams = dcn32_enable_phantom_streams,
+       .disable_phantom_streams = dcn32_disable_phantom_streams,
        .subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
        .update_visual_confirm_color = dcn10_update_visual_confirm_color,
        .subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
        .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
        .resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
        .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+       .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+       .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
 };
 
 void dcn32_hw_sequencer_init_functions(struct dc *dc)
index 8b6c49622f3b63c8e6dae68c507e1e45c5a736a2..4b92df23ff0db90498e722c0df0d8bbb149e76e2 100644 (file)
@@ -1342,8 +1342,8 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
 {
        int i = 0;
        struct drr_params params = {0};
-       // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
-       unsigned int event_triggers = 0x800;
+       // DRR set trigger event mapped to OTG_TRIG_A
+       unsigned int event_triggers = 0x2;//Bit[1]: OTG_TRIG_A
        // Note DRR trigger events are generated regardless of whether num frames met.
        unsigned int num_frames = 2;
 
@@ -1377,3 +1377,20 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
                }
        }
 }
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+               int num_pipes, const struct dc_static_screen_params *params)
+{
+       unsigned int i;
+       unsigned int triggers = 0;
+
+       if (params->triggers.surface_update)
+               triggers |= 0x200;/*bit 9  : 10 0000 0000*/
+       if (params->triggers.cursor_update)
+               triggers |= 0x8;/*bit3*/
+       if (params->triggers.force_trigger)
+               triggers |= 0x1;
+       for (i = 0; i < num_pipes; i++)
+               pipe_ctx[i]->stream_res.tg->funcs->
+                       set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+                                       triggers, params->num_frames);
+}
index fd66316e33de367da8c90e3520087fce385ebb5b..c354efa6c1b2f8f6e69754553f8ac905ffab8936 100644 (file)
@@ -90,4 +90,7 @@ uint32_t dcn35_get_idle_state(const struct dc *dc);
 void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
                int num_pipes, struct dc_crtc_timing_adjust adjust);
 
+void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+               int num_pipes, const struct dc_static_screen_params *params);
+
 #endif /* __DC_HWSS_DCN35_H__ */
index a630aa77dcec036c791c3ae4a75a2e3bedafe2f8..a93073055e7bfc976f6c9c4a2885a773a26ccf24 100644 (file)
@@ -70,7 +70,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn35_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn35_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
index 143d3fc0221cf88b44de1965587fc5578c66ea07..ab17fa1c64e8c5b405ae2f24a93c1ee54abefe10 100644 (file)
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
        .update_bandwidth = dcn20_update_bandwidth,
        .set_drr = dcn10_set_drr,
        .get_position = dcn10_get_position,
-       .set_static_screen_control = dcn30_set_static_screen_control,
+       .set_static_screen_control = dcn35_set_static_screen_control,
        .setup_stereo = dcn10_setup_stereo,
        .set_avmute = dcn30_set_avmute,
        .log_hw_state = dcn10_log_hw_state,
index a54399383318145b8bc72fc85e646bf546588609..f89f205e42a1a8ce0da54a83cdb3a3d2033aa3a6 100644 (file)
@@ -339,6 +339,8 @@ struct hw_sequencer_funcs {
 
        /* HW State Logging Related */
        void (*log_hw_state)(struct dc *dc, struct dc_log_buffer_ctx *log_ctx);
+       void (*log_color_state)(struct dc *dc,
+                               struct dc_log_buffer_ctx *log_ctx);
        void (*get_hw_state)(struct dc *dc, char *pBuf,
                        unsigned int bufSize, unsigned int mask);
        void (*clear_status_bits)(struct dc *dc, unsigned int mask);
@@ -379,6 +381,7 @@ struct hw_sequencer_funcs {
                        struct dc_cursor_attributes *cursor_attr);
        void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
        void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+       void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
        void (*subvp_pipe_control_lock)(struct dc *dc,
                        struct dc_state *context,
                        bool lock,
index 6137cf09aa54d25750246e86583c5938e557501b..554cfab5ab24a5be6b98b41acf19ffe3c023b620 100644 (file)
@@ -155,7 +155,6 @@ struct hwseq_private_funcs {
        void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable);
        void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
                               struct dc_state *context);
-#ifdef CONFIG_DRM_AMD_DC_FP
        void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
        void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
        void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
@@ -165,8 +164,14 @@ struct hwseq_private_funcs {
        void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
        void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
                        struct dc_state *context);
+       enum dc_status (*apply_single_controller_ctx_to_hw)(
+                       struct pipe_ctx *pipe_ctx,
+                       struct dc_state *context,
+                       struct dc *dc);
        bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
-#endif
+       void (*reset_back_end_for_pipe)(struct dc *dc,
+                       struct pipe_ctx *pipe_ctx,
+                       struct dc_state *context);
 };
 
 struct dce_hwseq {
index 3a6bf77a68732166d320dbea642929c3201d3e01..ebb659c327e06f7a3baf6e975b7f745536441920 100644 (file)
@@ -333,6 +333,8 @@ struct stream_resource {
        uint8_t gsl_group;
 
        struct test_pattern_params test_pattern_params;
+
+       bool left_edge_extra_pixel;
 };
 
 struct plane_resource {
index 6ed1fb8c930009a95842355af2181e8413999e6e..b6203253111cab1b573300f42ede8788e63ef658 100644 (file)
@@ -43,7 +43,8 @@ struct audio_funcs {
        void (*az_configure)(struct audio *audio,
                enum signal_type signal,
                const struct audio_crtc_info *crtc_info,
-               const struct audio_info *audio_info);
+               const struct audio_info *audio_info,
+               const struct audio_dp_link_info *dp_link_info);
 
        void (*wall_dto_setup)(struct audio *audio,
                enum signal_type signal,
index 6f4c97543c145fd75c9fbdccbd681dd4c14ad727..f4d4a68c91dc7d8bff2db9aff6c6429691081e2b 100644 (file)
@@ -356,6 +356,7 @@ struct clk_mgr_internal {
        long long wm_range_table_addr;
 
        bool dpm_present;
+       bool pme_trigger_pending;
 };
 
 struct clk_mgr_internal_funcs {
@@ -393,6 +394,11 @@ static inline int khz_to_mhz_ceil(int khz)
        return (khz + 999) / 1000;
 }
 
+static inline int khz_to_mhz_floor(int khz)
+{
+       return khz / 1000;
+}
+
 int clk_mgr_helper_get_active_display_cnt(
                struct dc *dc,
                struct dc_state *context);
index 901891316dfbf16fc3b1b598ad906e9ad8465c5f..2ae7484d18afb0f6f705015f2b81dc7c6b67d256 100644 (file)
 #ifndef __DAL_DCHUBBUB_H__
 #define __DAL_DCHUBBUB_H__
 
+/**
+ * DOC: overview
+ *
+ * There is only one common DCHUBBUB. It contains the common request and return
+ * blocks for the Data Fabric Interface that are not clock/power gated.
+ */
 
 enum dcc_control {
        dcc_control__256_256_xxx,
index f4aa76e02518549bafbf9965164c13aab32d0618..0f24afbf43883b39ff9aad7a1f273b6381a3971c 100644 (file)
 #ifndef __DAL_DPP_H__
 #define __DAL_DPP_H__
 
+/**
+ * DOC: overview
+ *
+ * The DPP (Display Pipe and Plane) block is the unified display data
+ * processing engine in DCN for processing graphic or video data on per DPP
+ * rectangle base. This rectangle can be a part of SLS (Single Large Surface),
+ * or a layer to be blended with other DPP, or a rectangle associated with a
+ * display tile.
+ *
+ * It provides various functions including:
+ * - graphic color keyer
+ * - graphic cursor compositing
+ * - graphic or video image source to destination scaling
+ * - image sharping
+ * - video format conversion from 4:2:0 or 4:2:2 to 4:4:4
+ * - Color Space Conversion
+ * - Host LUT gamma adjustment
+ * - Color Gamut Remap
+ * - brightness and contrast adjustment.
+ *
+ * DPP pipe consists of Converter and Cursor (CNVC), Scaler (DSCL), Color
+ * Management (CM), Output Buffer (OBUF) and Digital Bypass (DPB) module
+ * connected in a video/graphics pipeline.
+ */
+
 #include "transform.h"
 #include "cursor_reg_cache.h"
 
@@ -141,6 +166,7 @@ struct dcn_dpp_state {
        uint32_t igam_input_format;
        uint32_t dgam_lut_mode;
        uint32_t rgam_lut_mode;
+       // gamut_remap data for dcn10_get_cm_states()
        uint32_t gamut_remap_mode;
        uint32_t gamut_remap_c11_c12;
        uint32_t gamut_remap_c13_c14;
@@ -148,6 +174,16 @@ struct dcn_dpp_state {
        uint32_t gamut_remap_c23_c24;
        uint32_t gamut_remap_c31_c32;
        uint32_t gamut_remap_c33_c34;
+       // gamut_remap data for dcn*_log_color_state()
+       struct dpp_grph_csc_adjustment gamut_remap;
+       uint32_t shaper_lut_mode;
+       uint32_t lut3d_mode;
+       uint32_t lut3d_bit_depth;
+       uint32_t lut3d_size;
+       uint32_t blnd_lut_mode;
+       uint32_t pre_dgam_mode;
+       uint32_t pre_dgam_select;
+       uint32_t gamcor_mode;
 };
 
 struct CM_bias_params {
@@ -290,6 +326,9 @@ struct dpp_funcs {
        void (*dpp_cnv_set_alpha_keyer)(
                        struct dpp *dpp_base,
                        struct cnv_color_keyer_params *color_keyer);
+
+       void (*dpp_get_gamut_remap)(struct dpp *dpp_base,
+                                   struct dpp_grph_csc_adjustment *adjust);
 };
 
 
index 7f3f9b69e903a186fcc864963ece237403d3e88a..72610cd7eae0befbd83a05e385e4a9e5fcb32ef4 100644 (file)
 #ifndef __DAL_HUBP_H__
 #define __DAL_HUBP_H__
 
+/**
+ * DOC: overview
+ *
+ * Display Controller Hub (DCHUB) is the gateway between the Scalable Data Port
+ * (SDP) and DCN. This component has multiple features, such as memory
+ * arbitration, rotation, and cursor manipulation.
+ *
+ * There is one HUBP allocated per pipe, which fetches data and converts
+ * different pixel formats (i.e. ARGB8888, NV12, etc) into linear, interleaved
+ * and fixed-depth streams of pixel data.
+ */
+
 #include "mem_input.h"
 #include "cursor_reg_cache.h"
 
 #define OPP_ID_INVALID 0xf
 #define MAX_TTU 0xffffff
 
-
 enum cursor_pitch {
        CURSOR_PITCH_64_PIXELS = 0,
        CURSOR_PITCH_128_PIXELS,
@@ -146,9 +157,7 @@ struct hubp_funcs {
 
        void (*set_blank)(struct hubp *hubp, bool blank);
        void (*set_blank_regs)(struct hubp *hubp, bool blank);
-#ifdef CONFIG_DRM_AMD_DC_FP
        void (*phantom_hubp_post_enable)(struct hubp *hubp);
-#endif
        void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
 
        void (*set_cursor_attributes)(
index 61a2406dcc53937ff38ce7996c761a1c82cb9deb..ba9b942ce09f9ade4e705a86631159b2190f4413 100644 (file)
  */
 
 /**
- * DOC: mpc-overview
+ * DOC: overview
  *
- * Multiple Pipe/Plane Combined (MPC) is a component in the hardware pipeline
+ * Multiple Pipe/Plane Combiner (MPC) is a component in the hardware pipeline
  * that performs blending of multiple planes, using global and per-pixel alpha.
  * It also performs post-blending color correction operations according to the
  * hardware capabilities, such as color transformation matrix and gamma 1D and
  * 3D LUT.
+ *
+ * MPC receives output from all DPP pipes and combines them to multiple outputs
+ * supporting "M MPC inputs -> N MPC outputs" flexible composition
+ * architecture. It features:
+ *
+ * - Programmable blending structure to allow software controlled blending and
+ *   cascading;
+ * - Programmable window location of each DPP in active region of display;
+ * - Combining multiple DPP pipes in one active region when a single DPP pipe
+ *   cannot process very large surface;
+ * - Combining multiple DPP from different SLS with blending;
+ * - Stereo formats from single DPP in top-bottom or side-by-side modes;
+ * - Stereo formats from 2 DPPs;
+ * - Alpha blending of multiple layers from different DPP pipes;
+ * - Programmable background color;
  */
 
 #ifndef __DC_MPCC_H__
@@ -83,34 +98,66 @@ enum mpcc_alpha_blend_mode {
 
 /**
  * struct mpcc_blnd_cfg - MPCC blending configuration
- *
- * @black_color: background color
- * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE)
- * @pre_multiplied_alpha: whether pixel color values were pre-multiplied by the
- * alpha channel (MPCC_ALPHA_MULTIPLIED_MODE)
- * @global_gain: used when blend mode considers both pixel alpha and plane
- * alpha value and assumes the global alpha value.
- * @global_alpha: plane alpha value
- * @overlap_only: whether overlapping of different planes is allowed
- * @bottom_gain_mode: blend mode for bottom gain setting
- * @background_color_bpc: background color for bpc
- * @top_gain: top gain setting
- * @bottom_inside_gain: blend mode for bottom inside
- * @bottom_outside_gain:  blend mode for bottom outside
  */
 struct mpcc_blnd_cfg {
-       struct tg_color black_color;    /* background color */
-       enum mpcc_alpha_blend_mode alpha_mode;  /* alpha blend mode */
-       bool pre_multiplied_alpha;      /* alpha pre-multiplied mode flag */
+       /**
+        * @black_color: background color.
+        */
+       struct tg_color black_color;
+
+       /**
+        * @alpha_mode: alpha blend mode (MPCC_ALPHA_BLND_MODE).
+        */
+       enum mpcc_alpha_blend_mode alpha_mode;
+
+       /***
+        * @@pre_multiplied_alpha:
+        *
+        * Whether pixel color values were pre-multiplied by the alpha channel
+        * (MPCC_ALPHA_MULTIPLIED_MODE).
+        */
+       bool pre_multiplied_alpha;
+
+       /**
+        * @global_gain: Used when blend mode considers both pixel alpha and plane.
+        */
        int global_gain;
+
+       /**
+        * @global_alpha: Plane alpha value.
+        */
        int global_alpha;
+
+       /**
+        * @@overlap_only: Whether overlapping of different planes is allowed.
+        */
        bool overlap_only;
 
        /* MPCC top/bottom gain settings */
+
+       /**
+        * @bottom_gain_mode: Blend mode for bottom gain setting.
+        */
        int bottom_gain_mode;
+
+       /**
+        * @background_color_bpc: Background color for bpc.
+        */
        int background_color_bpc;
+
+       /**
+        * @top_gain: Top gain setting.
+        */
        int top_gain;
+
+       /**
+        * @bottom_inside_gain: Blend mode for bottom inside.
+        */
        int bottom_inside_gain;
+
+       /**
+        * @bottom_outside_gain: Blend mode for bottom outside.
+        */
        int bottom_outside_gain;
 };
 
@@ -150,34 +197,58 @@ struct mpc_dwb_flow_control {
 
 /**
  * struct mpcc - MPCC connection and blending configuration for a single MPCC instance.
- * @mpcc_id: MPCC physical instance
- * @dpp_id: DPP input to this MPCC
- * @mpcc_bot: pointer to bottom layer MPCC. NULL when not connected.
- * @blnd_cfg: the blending configuration for this MPCC
- * @sm_cfg: stereo mix setting for this MPCC
- * @shared_bottom: if MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
  *
  * This struct is used as a node in an MPC tree.
  */
 struct mpcc {
-       int mpcc_id;                    /* MPCC physical instance */
-       int dpp_id;                     /* DPP input to this MPCC */
-       struct mpcc *mpcc_bot;          /* pointer to bottom layer MPCC.  NULL when not connected */
-       struct mpcc_blnd_cfg blnd_cfg;  /* The blending configuration for this MPCC */
-       struct mpcc_sm_cfg sm_cfg;      /* stereo mix setting for this MPCC */
-       bool shared_bottom;             /* TRUE if MPCC output to both OPP and DWB endpoints, else FALSE */
+       /**
+        * @mpcc_id: MPCC physical instance.
+        */
+       int mpcc_id;
+
+       /**
+        * @dpp_id: DPP input to this MPCC
+        */
+       int dpp_id;
+
+       /**
+        * @mpcc_bot: Pointer to bottom layer MPCC. NULL when not connected.
+        */
+       struct mpcc *mpcc_bot;
+
+       /**
+        * @blnd_cfg: The blending configuration for this MPCC.
+        */
+       struct mpcc_blnd_cfg blnd_cfg;
+
+       /**
+        * @sm_cfg: stereo mix setting for this MPCC
+        */
+       struct mpcc_sm_cfg sm_cfg;
+
+       /**
+        * @shared_bottom:
+        *
+        * If MPCC output to both OPP and DWB endpoints, true. Otherwise, false.
+        */
+       bool shared_bottom;
 };
 
 /**
  * struct mpc_tree - MPC tree represents all MPCC connections for a pipe.
  *
- * @opp_id: the OPP instance that owns this MPC tree
- * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
  *
  */
 struct mpc_tree {
-       int opp_id;                     /* The OPP instance that owns this MPC tree */
-       struct mpcc *opp_list;          /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
+       /**
+        * @opp_id: The OPP instance that owns this MPC tree.
+        */
+       int opp_id;
+
+       /**
+        * @opp_list: the top MPCC layer of the MPC tree that outputs to OPP endpoint
+        */
+       struct mpcc *opp_list;
 };
 
 struct mpc {
@@ -199,6 +270,13 @@ struct mpcc_state {
        uint32_t overlap_only;
        uint32_t idle;
        uint32_t busy;
+       uint32_t shaper_lut_mode;
+       uint32_t lut3d_mode;
+       uint32_t lut3d_bit_depth;
+       uint32_t lut3d_size;
+       uint32_t rgam_mode;
+       uint32_t rgam_lut;
+       struct mpc_grph_gamut_adjustment gamut_remap;
 };
 
 /**
@@ -217,16 +295,20 @@ struct mpc_funcs {
         * Only used for planes that are part of blending chain for OPP output
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
-        * [in/out] tree        - MPC tree structure that plane will be added to.
-        * [in] blnd_cfg        - MPCC blending configuration for the new blending layer.
-        * [in] sm_cfg          - MPCC stereo mix configuration for the new blending layer.
-        *                        stereo mix must disable for the very bottom layer of the tree config.
-        * [in] insert_above_mpcc - Insert new plane above this MPCC.  If NULL, insert as bottom plane.
-        * [in] dpp_id           - DPP instance for the plane to be added.
-        * [in] mpcc_id          - The MPCC physical instance to use for blending.
-        *
-        * Return:  struct mpcc* - MPCC that was added.
+        *
+        * - [in/out] mpc  - MPC context.
+        * - [in/out] tree - MPC tree structure that plane will be added to.
+        * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+        * - [in] sm_cfg   - MPCC stereo mix configuration for the new blending layer.
+        *                   stereo mix must disable for the very bottom layer of the tree config.
+        * - [in] insert_above_mpcc - Insert new plane above this MPCC.
+        *                          If NULL, insert as bottom plane.
+        * - [in] dpp_id  - DPP instance for the plane to be added.
+        * - [in] mpcc_id - The MPCC physical instance to use for blending.
+        *
+        * Return:
+        *
+        * struct mpcc* - MPCC that was added.
         */
        struct mpcc* (*insert_plane)(
                        struct mpc *mpc,
@@ -243,11 +325,14 @@ struct mpc_funcs {
         * Remove a specified MPCC from the MPC tree.
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
-        * [in/out] tree        - MPC tree structure that plane will be removed from.
-        * [in/out] mpcc        - MPCC to be removed from tree.
         *
-        * Return:  void
+        * - [in/out] mpc   - MPC context.
+        * - [in/out] tree  - MPC tree structure that plane will be removed from.
+        * - [in/out] mpcc  - MPCC to be removed from tree.
+        *
+        * Return:
+        *
+        * void
         */
        void (*remove_mpcc)(
                        struct mpc *mpc,
@@ -260,9 +345,12 @@ struct mpc_funcs {
         * Reset the MPCC HW status by disconnecting all muxes.
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
         *
-        * Return:  void
+        * - [in/out] mpc - MPC context.
+        *
+        * Return:
+        *
+        * void
         */
        void (*mpc_init)(struct mpc *mpc);
        void (*mpc_init_single_inst)(
@@ -275,11 +363,14 @@ struct mpc_funcs {
         * Update the blending configuration for a specified MPCC.
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
-        * [in]     blnd_cfg    - MPCC blending configuration.
-        * [in]     mpcc_id     - The MPCC physical instance.
         *
-        * Return:  void
+        * - [in/out] mpc - MPC context.
+        * - [in] blnd_cfg - MPCC blending configuration.
+        * - [in] mpcc_id  - The MPCC physical instance.
+        *
+        * Return:
+        *
+        * void
         */
        void (*update_blending)(
                struct mpc *mpc,
@@ -289,15 +380,18 @@ struct mpc_funcs {
        /**
         * @cursor_lock:
         *
-        * Lock cursor updates for the specified OPP.
-        * OPP defines the set of MPCC that are locked together for cursor.
+        * Lock cursor updates for the specified OPP. OPP defines the set of
+        * MPCC that are locked together for cursor.
         *
         * Parameters:
-        * [in]         mpc             - MPC context.
-        * [in]     opp_id      - The OPP to lock cursor updates on
-        * [in]         lock    - lock/unlock the OPP
         *
-        * Return:  void
+        * - [in] mpc - MPC context.
+        * - [in] opp_id  - The OPP to lock cursor updates on
+        * - [in] lock - lock/unlock the OPP
+        *
+        * Return:
+        *
+        * void
         */
        void (*cursor_lock)(
                        struct mpc *mpc,
@@ -307,20 +401,25 @@ struct mpc_funcs {
        /**
         * @insert_plane_to_secondary:
         *
-        * Add DPP into secondary MPC tree based on specified blending position.
-        * Only used for planes that are part of blending chain for DWB output
+        * Add DPP into secondary MPC tree based on specified blending
+        * position.  Only used for planes that are part of blending chain for
+        * DWB output
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
-        * [in/out] tree                - MPC tree structure that plane will be added to.
-        * [in] blnd_cfg        - MPCC blending configuration for the new blending layer.
-        * [in] sm_cfg          - MPCC stereo mix configuration for the new blending layer.
-        *                        stereo mix must disable for the very bottom layer of the tree config.
-        * [in] insert_above_mpcc - Insert new plane above this MPCC.  If NULL, insert as bottom plane.
-        * [in] dpp_id          - DPP instance for the plane to be added.
-        * [in] mpcc_id         - The MPCC physical instance to use for blending.
-        *
-        * Return:  struct mpcc* - MPCC that was added.
+        *
+        * - [in/out] mpc  - MPC context.
+        * - [in/out] tree - MPC tree structure that plane will be added to.
+        * - [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+        * - [in] sm_cfg   - MPCC stereo mix configuration for the new blending layer.
+        *          stereo mix must disable for the very bottom layer of the tree config.
+        * - [in] insert_above_mpcc - Insert new plane above this MPCC.  If
+        *          NULL, insert as bottom plane.
+        * - [in] dpp_id - DPP instance for the plane to be added.
+        * - [in] mpcc_id - The MPCC physical instance to use for blending.
+        *
+        * Return:
+        *
+        * struct mpcc* - MPCC that was added.
         */
        struct mpcc* (*insert_plane_to_secondary)(
                        struct mpc *mpc,
@@ -337,10 +436,14 @@ struct mpc_funcs {
         * Remove a specified DPP from the 'secondary' MPC tree.
         *
         * Parameters:
-        * [in/out] mpc         - MPC context.
-        * [in/out] tree        - MPC tree structure that plane will be removed from.
-        * [in]     mpcc        - MPCC to be removed from tree.
-        * Return:  void
+        *
+        * - [in/out] mpc  - MPC context.
+        * - [in/out] tree - MPC tree structure that plane will be removed from.
+        * - [in]     mpcc - MPCC to be removed from tree.
+        *
+        * Return:
+        *
+        * void
         */
        void (*remove_mpcc_from_secondary)(
                        struct mpc *mpc,
index 7617fabbd16ee64a82a26e1a1664b1f428f0bbdb..aee5372e292c5a691c9b6f2d8a45ef31e823889d 100644 (file)
  *
  */
 
+/**
+ * DOC: overview
+ *
+ * The Output Plane Processor (OPP) block groups have functions that format
+ * pixel streams such that they are suitable for display at the display device.
+ * The key functions contained in the OPP are:
+ *
+ * - Adaptive Backlight Modulation (ABM)
+ * - Formatter (FMT) which provide pixel-by-pixel operations for format the
+ *   incoming pixel stream.
+ * - Output Buffer that provide pixel replication, and overlapping.
+ * - Interface between MPC and OPTC.
+ * - Clock and reset generation.
+ * - CRC generation.
+ */
+
 #ifndef __DAL_OPP_H__
 #define __DAL_OPP_H__
 
index 9a00a99317b298c0b7e148acebb032c518c6f310..d98d72f35be5bd3eb28f8db759c6f8848bf5fa85 100644 (file)
@@ -182,9 +182,7 @@ struct timing_generator_funcs {
 
        bool (*enable_crtc)(struct timing_generator *tg);
        bool (*disable_crtc)(struct timing_generator *tg);
-#ifdef CONFIG_DRM_AMD_DC_FP
        void (*phantom_crtc_post_enable)(struct timing_generator *tg);
-#endif
        void (*disable_phantom_crtc)(struct timing_generator *tg);
        bool (*immediate_disable_crtc)(struct timing_generator *tg);
        bool (*is_counter_moving)(struct timing_generator *tg);
index c958ef37b78a667b1bb9bfb26827ae3e45053715..b14d52e52fa2f43c2f7fa46891513de0675c8180 100644 (file)
@@ -107,6 +107,10 @@ void resource_build_test_pattern_params(
                struct resource_context *res_ctx,
                struct pipe_ctx *pipe_ctx);
 
+void resource_build_subsampling_params(
+               struct resource_context *res_ctx,
+               struct pipe_ctx *pipe_ctx);
+
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx);
 
 enum dc_status resource_build_scaling_params_for_context(
@@ -427,22 +431,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
 int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
 
 /*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
  */
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
 
 /*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
  */
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
 
 /* Get the ODM slice index counting from 0 from left most slice */
 int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
index f4633d3cf9b98ecf77e4572be64b397d6c311881..a1f72fe378ee4824dd940f2c88565eda1330237f 100644 (file)
  * Authors: AMD
  *
  */
+
+/**
+ * DOC: overview
+ *
+ * Display Input Output (DIO), is the display input and output unit in DCN. It
+ * includes output encoders to support different display output, like
+ * DisplayPort, HDMI, DVI interface, and others. It also includes the control
+ * and status channels for these interfaces.
+ */
+
 #ifndef __LINK_HWSS_DIO_H__
 #define __LINK_HWSS_DIO_H__
 
index 24153b0df503dd77a672526ac911947ac272e789..b8c4a04dd175789e92a4076ba5eb82a6aae573c4 100644 (file)
@@ -41,6 +41,7 @@
 #include "protocols/link_dp_dpia.h"
 #include "protocols/link_dp_phy.h"
 #include "protocols/link_dp_training.h"
+#include "protocols/link_dp_dpia_bw.h"
 #include "accessories/link_dp_trace.h"
 
 #include "link_enc_cfg.h"
@@ -991,6 +992,23 @@ static bool detect_link_and_local_sink(struct dc_link *link,
                        if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
                                        link->reported_link_cap.link_rate > LINK_RATE_HIGH3)
                                link->reported_link_cap.link_rate = LINK_RATE_HIGH3;
+
+                       /*
+                        * If this is DP over USB4 link then we need to:
+                        * - Enable BW ALLOC support on DPtx if applicable
+                        */
+                       if (dc->config.usb4_bw_alloc_support) {
+                               if (link_dp_dpia_set_dptx_usb4_bw_alloc_support(link)) {
+                                       /* update with non reduced link cap if bw allocation mode is supported */
+                                       if (link->dpia_bw_alloc_config.nrd_max_link_rate &&
+                                               link->dpia_bw_alloc_config.nrd_max_lane_count) {
+                                               link->reported_link_cap.link_rate =
+                                                       link->dpia_bw_alloc_config.nrd_max_link_rate;
+                                               link->reported_link_cap.lane_count =
+                                                       link->dpia_bw_alloc_config.nrd_max_lane_count;
+                                       }
+                               }
+                       }
                        break;
                }
 
index 3cbfbf8d107e9b62c639ef1618041b8fc09dd9b5..a72de44a5747a446e655536747c40135df2a686a 100644 (file)
@@ -2197,6 +2197,64 @@ static enum dc_status enable_link(
 
 static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, int bw)
 {
+       struct dc_link *link = stream->sink->link;
+       int req_bw = bw;
+
+       DC_LOGGER_INIT(link->ctx->logger);
+
+       if (!link->dpia_bw_alloc_config.bw_alloc_enabled)
+               return false;
+
+       if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+               int sink_index = 0;
+               int i = 0;
+
+               for (i = 0; i < link->sink_count; i++) {
+                       if (link->remote_sinks[i] == NULL)
+                               continue;
+
+                       if (stream->sink->sink_id != link->remote_sinks[i]->sink_id)
+                               req_bw += link->dpia_bw_alloc_config.remote_sink_req_bw[i];
+                       else
+                               sink_index = i;
+               }
+
+               link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw;
+       }
+
+       /* get dp overhead for dp tunneling */
+       link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link);
+       req_bw += link->dpia_bw_alloc_config.dp_overhead;
+
+       if (link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw)) {
+               if (req_bw <= link->dpia_bw_alloc_config.allocated_bw) {
+                       DC_LOG_DEBUG("%s, Success in allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+                                       __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+                                       link->dpia_bw_alloc_config.dp_overhead);
+               } else {
+                       // Cannot get the required bandwidth.
+                       DC_LOG_ERROR("%s, Failed to allocate bw for link(%d), allocated_bw(%d), dp_overhead(%d)\n",
+                                       __func__, link->link_index, link->dpia_bw_alloc_config.allocated_bw,
+                                       link->dpia_bw_alloc_config.dp_overhead);
+                       return false;
+               }
+       } else {
+               DC_LOG_DEBUG("%s, usb4 request bw timeout\n", __func__);
+               return false;
+       }
+
+       if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+               int i = 0;
+
+               for (i = 0; i < link->sink_count; i++) {
+                       if (link->remote_sinks[i] == NULL)
+                               continue;
+                       DC_LOG_DEBUG("%s, remote_sink=%s, request_bw=%d\n", __func__,
+                                       (const char *)(&link->remote_sinks[i]->edid_caps.display_name[0]),
+                                       link->dpia_bw_alloc_config.remote_sink_req_bw[i]);
+               }
+       }
+
        return true;
 }
 
index 8fe66c3678508d9aee6779fa25cd6128e1f30832..1c038e2a527b36e9a0f00bdb703290eae431753a 100644 (file)
@@ -125,11 +125,9 @@ static bool dp_active_dongle_validate_timing(
                if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
                        struct dc_crtc_timing outputTiming = *timing;
 
-#if defined(CONFIG_DRM_AMD_DC_FP)
                        if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
                                /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
                                outputTiming.flags.DSC = 0;
-#endif
                        if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) >
                                        dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
                                return false;
index 5a0b0451895690d184ec00c56873f0d1acad6864..e06d3c2d891023f323b4136306bf0d8c0ee4f76f 100644 (file)
@@ -1505,10 +1505,7 @@ enum link_training_result dp_perform_link_training(
         * Non-LT AUX transactions inside training mode.
         */
        if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
-               if (link->dc->config.use_old_fixed_vs_sequence)
-                       status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, &lt_settings);
-               else
-                       status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+               status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
        else if (encoding == DP_8b_10b_ENCODING)
                status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
        else if (encoding == DP_128b_132b_ENCODING)
index 7087cdc9e9775c4ad953d3db6eafba6ea12e72b0..b5cf75975fffd64774aa9ee8ed64ba44e50deb6e 100644 (file)
@@ -186,356 +186,6 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
        return status;
 }
 
-
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       struct link_training_settings *lt_settings)
-{
-       const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
-       const uint8_t offset = dp_parse_lttpr_repeater_count(
-                       link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-       const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
-       const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
-       uint32_t pre_disable_intercept_delay_ms = 0;
-       uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
-       uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
-       const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19};
-       const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01};
-       const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
-       const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
-       const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
-       const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
-       enum link_training_result status = LINK_TRAINING_SUCCESS;
-       uint8_t lane = 0;
-       union down_spread_ctrl downspread = {0};
-       union lane_count_set lane_count_set = {0};
-       uint8_t toggle_rate;
-       uint8_t rate;
-
-       /* Only 8b/10b is supported */
-       ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
-                       DP_8b_10b_ENCODING);
-
-       if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-               status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
-               return status;
-       }
-
-       if (offset != 0xFF) {
-               if (offset == 2) {
-                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
-
-               /* Certain display and cable configuration require extra delay */
-               } else if (offset > 2) {
-                       pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
-               }
-       }
-
-       /* Vendor specific: Reset lane settings */
-       link_configure_fixed_vs_pe_retimer(link->ddc,
-                       &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset));
-       link_configure_fixed_vs_pe_retimer(link->ddc,
-                       &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
-       link_configure_fixed_vs_pe_retimer(link->ddc,
-                       &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
-       /* Vendor specific: Enable intercept */
-       link_configure_fixed_vs_pe_retimer(link->ddc,
-                       &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en));
-
-
-       /* 1. set link rate, lane count and spread. */
-
-       downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
-       lane_count_set.bits.LANE_COUNT_SET =
-       lt_settings->link_settings.lane_count;
-
-       lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
-       lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
-       if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
-               lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
-                               link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
-       }
-
-       core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
-               &downspread.raw, sizeof(downspread));
-
-       core_link_write_dpcd(link, DP_LANE_COUNT_SET,
-               &lane_count_set.raw, 1);
-
-       rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
-       /* Vendor specific: Toggle link rate */
-       toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
-       if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
-               core_link_write_dpcd(
-                               link,
-                               DP_LINK_BW_SET,
-                               &toggle_rate,
-                               1);
-       }
-
-       link->vendor_specific_lttpr_link_rate_wa = rate;
-
-       core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
-       DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
-               __func__,
-               DP_LINK_BW_SET,
-               lt_settings->link_settings.link_rate,
-               DP_LANE_COUNT_SET,
-               lt_settings->link_settings.lane_count,
-               lt_settings->enhanced_framing,
-               DP_DOWNSPREAD_CTRL,
-               lt_settings->link_settings.link_spread);
-
-       link_configure_fixed_vs_pe_retimer(link->ddc,
-                       &vendor_lttpr_write_data_dpmf[0],
-                       sizeof(vendor_lttpr_write_data_dpmf));
-
-       if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
-               link_configure_fixed_vs_pe_retimer(link->ddc,
-                               &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
-               link_configure_fixed_vs_pe_retimer(link->ddc,
-                               &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2));
-               link_configure_fixed_vs_pe_retimer(link->ddc,
-                               &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3));
-               link_configure_fixed_vs_pe_retimer(link->ddc,
-                               &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4));
-               link_configure_fixed_vs_pe_retimer(link->ddc,
-                               &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5));
-       }
-
-       /* 2. Perform link training */
-
-       /* Perform Clock Recovery Sequence */
-       if (status == LINK_TRAINING_SUCCESS) {
-               const uint8_t max_vendor_dpcd_retries = 10;
-               uint32_t retries_cr;
-               uint32_t retry_count;
-               uint32_t wait_time_microsec;
-               enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
-               union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
-               union lane_align_status_updated dpcd_lane_status_updated;
-               union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-               uint8_t i = 0;
-
-               retries_cr = 0;
-               retry_count = 0;
-
-               memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
-               memset(&dpcd_lane_status_updated, '\0',
-               sizeof(dpcd_lane_status_updated));
-
-               while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
-                       (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
-                       /* 1. call HWSS to set lane settings */
-                       dp_set_hw_lane_settings(
-                                       link,
-                                       link_res,
-                                       lt_settings,
-                                       0);
-
-                       /* 2. update DPCD of the receiver */
-                       if (!retry_count) {
-                               /* EPR #361076 - write as a 5-byte burst,
-                                * but only for the 1-st iteration.
-                                */
-                               dpcd_set_lt_pattern_and_lane_settings(
-                                               link,
-                                               lt_settings,
-                                               lt_settings->pattern_for_cr,
-                                               0);
-                               /* Vendor specific: Disable intercept */
-                               for (i = 0; i < max_vendor_dpcd_retries; i++) {
-                                       if (pre_disable_intercept_delay_ms != 0)
-                                               msleep(pre_disable_intercept_delay_ms);
-                                       if (link_configure_fixed_vs_pe_retimer(link->ddc,
-                                                       &vendor_lttpr_write_data_intercept_dis[0],
-                                                       sizeof(vendor_lttpr_write_data_intercept_dis)))
-                                               break;
-
-                                       link_configure_fixed_vs_pe_retimer(link->ddc,
-                                                       &vendor_lttpr_write_data_intercept_en[0],
-                                                       sizeof(vendor_lttpr_write_data_intercept_en));
-                               }
-                       } else {
-                               vendor_lttpr_write_data_vs[3] = 0;
-                               vendor_lttpr_write_data_pe[3] = 0;
-
-                               for (lane = 0; lane < lane_count; lane++) {
-                                       vendor_lttpr_write_data_vs[3] |=
-                                                       lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
-                                       vendor_lttpr_write_data_pe[3] |=
-                                                       lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
-                               }
-
-                               /* Vendor specific: Update VS and PE to DPRX requested value */
-                               link_configure_fixed_vs_pe_retimer(link->ddc,
-                                               &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
-                               link_configure_fixed_vs_pe_retimer(link->ddc,
-                                               &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
-                               dpcd_set_lane_settings(
-                                               link,
-                                               lt_settings,
-                                               0);
-                       }
-
-                       /* 3. wait receiver to lock-on*/
-                       wait_time_microsec = lt_settings->cr_pattern_time;
-
-                       dp_wait_for_training_aux_rd_interval(
-                                       link,
-                                       wait_time_microsec);
-
-                       /* 4. Read lane status and requested drive
-                        * settings as set by the sink
-                        */
-                       dp_get_lane_status_and_lane_adjust(
-                                       link,
-                                       lt_settings,
-                                       dpcd_lane_status,
-                                       &dpcd_lane_status_updated,
-                                       dpcd_lane_adjust,
-                                       0);
-
-                       /* 5. check CR done*/
-                       if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
-                               status = LINK_TRAINING_SUCCESS;
-                               break;
-                       }
-
-                       /* 6. max VS reached*/
-                       if (dp_is_max_vs_reached(lt_settings))
-                               break;
-
-                       /* 7. same lane settings */
-                       /* Note: settings are the same for all lanes,
-                        * so comparing first lane is sufficient
-                        */
-                       if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
-                                       dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
-                               retries_cr++;
-                       else
-                               retries_cr = 0;
-
-                       /* 8. update VS/PE/PC2 in lt_settings*/
-                       dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
-                                       lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-                       retry_count++;
-               }
-
-               if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
-                       ASSERT(0);
-                       DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
-                               __func__,
-                               LINK_TRAINING_MAX_CR_RETRY);
-
-               }
-
-               status = dp_get_cr_failure(lane_count, dpcd_lane_status);
-       }
-
-       /* Perform Channel EQ Sequence */
-       if (status == LINK_TRAINING_SUCCESS) {
-               enum dc_dp_training_pattern tr_pattern;
-               uint32_t retries_ch_eq;
-               uint32_t wait_time_microsec;
-               enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
-               union lane_align_status_updated dpcd_lane_status_updated = {0};
-               union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
-               union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
-               /* Note: also check that TPS4 is a supported feature*/
-               tr_pattern = lt_settings->pattern_for_eq;
-
-               dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
-               status = LINK_TRAINING_EQ_FAIL_EQ;
-
-               for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
-                       retries_ch_eq++) {
-
-                       dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
-                       vendor_lttpr_write_data_vs[3] = 0;
-                       vendor_lttpr_write_data_pe[3] = 0;
-
-                       for (lane = 0; lane < lane_count; lane++) {
-                               vendor_lttpr_write_data_vs[3] |=
-                                               lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
-                               vendor_lttpr_write_data_pe[3] |=
-                                               lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
-                       }
-
-                       /* Vendor specific: Update VS and PE to DPRX requested value */
-                       link_configure_fixed_vs_pe_retimer(link->ddc,
-                                       &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs));
-                       link_configure_fixed_vs_pe_retimer(link->ddc,
-                                       &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe));
-
-                       /* 2. update DPCD*/
-                       if (!retries_ch_eq)
-                               /* EPR #361076 - write as a 5-byte burst,
-                                * but only for the 1-st iteration
-                                */
-
-                               dpcd_set_lt_pattern_and_lane_settings(
-                                       link,
-                                       lt_settings,
-                                       tr_pattern, 0);
-                       else
-                               dpcd_set_lane_settings(link, lt_settings, 0);
-
-                       /* 3. wait for receiver to lock-on*/
-                       wait_time_microsec = lt_settings->eq_pattern_time;
-
-                       dp_wait_for_training_aux_rd_interval(
-                                       link,
-                                       wait_time_microsec);
-
-                       /* 4. Read lane status and requested
-                        * drive settings as set by the sink
-                        */
-                       dp_get_lane_status_and_lane_adjust(
-                               link,
-                               lt_settings,
-                               dpcd_lane_status,
-                               &dpcd_lane_status_updated,
-                               dpcd_lane_adjust,
-                               0);
-
-                       /* 5. check CR done*/
-                       if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
-                               status = LINK_TRAINING_EQ_FAIL_CR;
-                               break;
-                       }
-
-                       /* 6. check CHEQ done*/
-                       if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
-                                       dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
-                                       dp_is_interlane_aligned(dpcd_lane_status_updated)) {
-                               status = LINK_TRAINING_SUCCESS;
-                               break;
-                       }
-
-                       /* 7. update VS/PE/PC2 in lt_settings*/
-                       dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
-                                       lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-               }
-       }
-
-       return status;
-}
-
 enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
        struct dc_link *link,
        const struct link_resource *link_res,
@@ -620,18 +270,20 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
 
        rate = get_dpcd_link_rate(&lt_settings->link_settings);
 
-       /* Vendor specific: Toggle link rate */
-       toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+       if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) {
+               /* Vendor specific: Toggle link rate */
+               toggle_rate = (rate == 0x6) ? 0xA : 0x6;
 
-       if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
-               core_link_write_dpcd(
-                               link,
-                               DP_LINK_BW_SET,
-                               &toggle_rate,
-                               1);
-       }
+               if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
+                       core_link_write_dpcd(
+                                       link,
+                                       DP_LINK_BW_SET,
+                                       &toggle_rate,
+                                       1);
+               }
 
-       link->vendor_specific_lttpr_link_rate_wa = rate;
+               link->vendor_specific_lttpr_link_rate_wa = rate;
+       }
 
        core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
 
index c0d6ea329504ffeea503cf5a72193b05375040bc..e61970e27661d97c8dcec49c41e3d1e5d2a13223 100644 (file)
 #define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
 #include "link_dp_training.h"
 
-enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       struct link_training_settings *lt_settings);
-
 enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
        struct dc_link *link,
        const struct link_resource *link_res,
index 046d3e205415311cd63a98aa3c0e59c8aaea2e89..443215b963089be5cbadd361599d1f8090aa192b 100644 (file)
@@ -287,7 +287,7 @@ bool set_default_brightness_aux(struct dc_link *link)
        if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
                if (!read_default_bl_aux(link, &default_backlight))
                        default_backlight = 150000;
-               // if < 1 nits or > 5000, it might be wrong readback
+               // if > 5000, it might be wrong readback. 0 nits is a valid default value for OLED panel.
                if (default_backlight < 1000 || default_backlight > 5000000)
                        default_backlight = 150000;
 
index 37a64186f3241a456c4eeea6038cc311fca0de43..ecc477ef8e3b78e1705b8546ac17e1d8490982f7 100644 (file)
@@ -2169,6 +2169,17 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
                                        optimal_uclk_for_dcfclk_sta_targets[i] =
                                                        bw_params->clk_table.entries[j].memclk_mhz * 16;
                                        break;
+                               } else {
+                                       /* condition where (dcfclk_sta_targets[i] >= optimal_dcfclk_for_uclk[j]):
+                                        * If it just so happens that the memory bandwidth is low enough such that
+                                        * all the optimal DCFCLK for each UCLK is lower than the smallest DCFCLK STA
+                                        * target, we need to populate the optimal UCLK for each DCFCLK STA target to
+                                        * be the max UCLK.
+                                        */
+                                       if (j == num_uclk_states - 1) {
+                                               optimal_uclk_for_dcfclk_sta_targets[i] =
+                                                               bw_params->clk_table.entries[j].memclk_mhz * 16;
+                                       }
                                }
                        }
                }
index 511ff6b5b9856776ea834393e4a7bfcaa90ca49f..7538b548c5725177b12e2d169acc681c31174797 100644 (file)
@@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
        vpg = dcn301_vpg_create(ctx, vpg_inst);
        afmt = dcn301_afmt_create(ctx, afmt_inst);
 
-       if (!enc1 || !vpg || !afmt) {
+       if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
                kfree(enc1);
                kfree(vpg);
                kfree(afmt);
index 31035fc3d8686359e00c694519429c000165e844..04d142f974745e3e4dc327eb0b7f8c7875839501 100644 (file)
@@ -1941,8 +1941,6 @@ static bool dcn31_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
-       dc->config.use_old_fixed_vs_sequence = true;
-
        /* Use pipe context based otg sync logic */
        dc->config.use_pipe_ctx_sync_logic = true;
 
index c4d71e7f18af47ba47dbc89e1a9098a0a4eade04..6f10052caeef02c3448307c4c81aef805e68e95b 100644 (file)
@@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
                dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
                DC_FP_END();
                pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
-               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+               if (dc->config.enable_windowed_mpo_odm &&
+                               dc->debug.enable_single_display_2to1_odm_policy) {
+                       switch (resource_get_odm_slice_count(pipe)) {
+                       case 2:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+                               break;
+                       case 4:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+                               break;
+                       default:
+                               pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+                       }
+               } else {
+                       pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+               }
                pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
                pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
                pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
index 74412e5f03fefbaa9350982ac92bc528cc8e80e8..6f832bf278cff540d548d17bb674a62614421361 100644 (file)
@@ -1760,6 +1760,7 @@ static bool dcn321_resource_construct(
        dc->caps.color.mpc.ocsc = 1;
 
        dc->config.dc_mode_clk_limit_support = true;
+       dc->config.enable_windowed_mpo_odm = false;
        /* read VBIOS LTTPR caps */
        {
                if (ctx->dc_bios->funcs->get_lttpr_caps) {
index 761ec989187568730fdd8cd51cd1802fa657be9c..e534e87cc85b5e20290e6d14d337872c22062224 100644 (file)
@@ -701,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
 
        // 6:1 downscaling ratio: 1000/6 = 166.666
        .max_downscale_factor = {
-                       .argb8888 = 167,
+                       .argb8888 = 250,
                        .nv12 = 167,
                        .fp16 = 167
        },
@@ -764,6 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        },
        .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
        .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
+       .minimum_z8_residency_time = 2100,
        .using_dml2 = true,
        .support_eDP1_5 = true,
        .enable_hpo_pg_support = false,
@@ -780,8 +781,9 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_z10 = false,
        .ignore_pg = true,
        .psp_disabled_wa = true,
-       .ips2_eval_delay_us = 200,
-       .ips2_entry_delay_us = 400,
+       .ips2_eval_delay_us = 1650,
+       .ips2_entry_delay_us = 800,
+       .disable_dmub_reallow_idle = true,
        .static_screen_wait_frames = 2,
 };
 
@@ -2130,6 +2132,7 @@ static bool dcn35_resource_construct(
        dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
        dc->dml2_options.use_native_pstate_optimization = true;
        dc->dml2_options.use_native_soc_bb_construction = true;
+       dc->dml2_options.minimize_dispclk_using_odm = false;
        if (dc->config.EnableMinDispClkODM)
                dc->dml2_options.minimize_dispclk_using_odm = true;
        dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
index c78c9224ab6060493a454683423c9d2a3b27e9a1..ae30fe2b6d0d90a6bcbb5bcea0d1f71d1aa8517a 100644 (file)
@@ -78,6 +78,12 @@ struct dmub_srv_dcn31_regs;
 
 struct dmcub_trace_buf_entry;
 
+/* enum dmub_window_memory_type - memory location type specification for windows */
+enum dmub_window_memory_type {
+       DMUB_WINDOW_MEMORY_TYPE_FB = 0,
+       DMUB_WINDOW_MEMORY_TYPE_GART
+};
+
 /* enum dmub_status - return code for dmcub functions */
 enum dmub_status {
        DMUB_STATUS_OK = 0,
@@ -203,7 +209,7 @@ struct dmub_srv_region_params {
        uint32_t vbios_size;
        const uint8_t *fw_inst_const;
        const uint8_t *fw_bss_data;
-       bool is_mailbox_in_inbox;
+       const enum dmub_window_memory_type *window_memory_type;
 };
 
 /**
@@ -223,7 +229,7 @@ struct dmub_srv_region_params {
  */
 struct dmub_srv_region_info {
        uint32_t fb_size;
-       uint32_t inbox_size;
+       uint32_t gart_size;
        uint8_t num_regions;
        struct dmub_region regions[DMUB_WINDOW_TOTAL];
 };
@@ -239,9 +245,10 @@ struct dmub_srv_region_info {
 struct dmub_srv_memory_params {
        const struct dmub_srv_region_info *region_info;
        void *cpu_fb_addr;
-       void *cpu_inbox_addr;
+       void *cpu_gart_addr;
        uint64_t gpu_fb_addr;
-       uint64_t gpu_inbox_addr;
+       uint64_t gpu_gart_addr;
+       const enum dmub_window_memory_type *window_memory_type;
 };
 
 /**
@@ -443,7 +450,6 @@ struct dmub_srv_create_params {
        struct dmub_srv_base_funcs funcs;
        struct dmub_srv_hw_funcs *hw_funcs;
        void *user_ctx;
-       struct dc_context *dc_ctx;
        enum dmub_asic asic;
        uint32_t fw_version;
        bool is_virtual;
index e699731ee68e96388c52ed55c17b34cc8710aaab..59b96136871e5a97f0eefe16ef3f6e66d99e4fa4 100644 (file)
 #ifndef DMUB_CMD_H
 #define DMUB_CMD_H
 
-#if defined(_TEST_HARNESS) || defined(FPGA_USB4)
-#include "dmub_fw_types.h"
-#include "include_legacy/atomfirmware.h"
-
-#if defined(_TEST_HARNESS)
-#include <string.h>
-#endif
-#else
-
 #include <asm/byteorder.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -42,8 +33,6 @@
 
 #include "atomfirmware.h"
 
-#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-
 //<DMUB_TYPES>==================================================================
 /* Basic type definitions. */
 
@@ -403,15 +392,16 @@ union replay_debug_flags {
 
                /**
                 * 0x400 (bit 10)
-                * @force_disable_ips1: Force disable IPS1 state
+                * @enable_ips_visual_confirm: Enable IPS visual confirm when entering IPS
+                * If we enter IPS2, the Visual confirm bar will change to yellow
                 */
-               uint32_t force_disable_ips1 : 1;
+               uint32_t enable_ips_visual_confirm : 1;
 
                /**
                 * 0x800 (bit 11)
-                * @force_disable_ips2: Force disable IPS2 state
+                * @enable_ips_residency_profiling: Enable IPS residency profiling
                 */
-               uint32_t force_disable_ips2 : 1;
+               uint32_t enable_ips_residency_profiling : 1;
 
                uint32_t reserved : 20;
        } bitfields;
@@ -1270,11 +1260,11 @@ struct dmub_cmd_PLAT_54186_wa {
        uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; /**< reg value */
        uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; /**< reg value */
        struct {
-               uint8_t hubp_inst : 4; /**< HUBP instance */
-               uint8_t tmz_surface : 1; /**< TMZ enable or disable */
-               uint8_t immediate :1; /**< Immediate flip */
-               uint8_t vmid : 4; /**< VMID */
-               uint8_t grph_stereo : 1; /**< 1 if stereo */
+               uint32_t hubp_inst : 4; /**< HUBP instance */
+               uint32_t tmz_surface : 1; /**< TMZ enable or disable */
+               uint32_t immediate :1; /**< Immediate flip */
+               uint32_t vmid : 4; /**< VMID */
+               uint32_t grph_stereo : 1; /**< 1 if stereo */
                uint32_t reserved : 21; /**< Reserved */
        } flip_params; /**< Pageflip parameters */
        uint32_t reserved[9]; /**< Reserved bits */
index 2daa1e0c806165a382717705cf08c3ad99d5e8ba..305463b8f110be1742476528bf2ee64439c02a58 100644 (file)
@@ -32,8 +32,6 @@
 #include "dcn/dcn_3_2_0_offset.h"
 #include "dcn/dcn_3_2_0_sh_mask.h"
 
-#define DCN_BASE__INST0_SEG2                       0x000034C0
-
 #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg]
 #define CTX dmub
 #define REGS dmub->regs_dcn32
index 9ad738805320deeba210f6c103459617e553768f..569c2a27a042b1a983bbae2513ebd65d312e114c 100644 (file)
@@ -417,58 +417,44 @@ void dmub_srv_destroy(struct dmub_srv *dmub)
        dmub_memset(dmub, 0, sizeof(*dmub));
 }
 
+static uint32_t dmub_srv_calc_regions_for_memory_type(const struct dmub_srv_region_params *params,
+       struct dmub_srv_region_info *out,
+       const uint32_t *window_sizes,
+       enum dmub_window_memory_type memory_type)
+{
+       uint32_t i, top = 0;
+
+       for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+               if (params->window_memory_type[i] == memory_type) {
+                       struct dmub_region *region = &out->regions[i];
+
+                       region->base = dmub_align(top, 256);
+                       region->top = region->base + dmub_align(window_sizes[i], 64);
+                       top = region->top;
+               }
+       }
+
+       return dmub_align(top, 4096);
+}
+
 enum dmub_status
-dmub_srv_calc_region_info(struct dmub_srv *dmub,
-                         const struct dmub_srv_region_params *params,
-                         struct dmub_srv_region_info *out)
+       dmub_srv_calc_region_info(struct dmub_srv *dmub,
+               const struct dmub_srv_region_params *params,
+               struct dmub_srv_region_info *out)
 {
-       struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST];
-       struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK];
-       struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA];
-       struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS];
-       struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
-       struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
-       struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
-       struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
        const struct dmub_fw_meta_info *fw_info;
        uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
        uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
-       uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
-       uint32_t previous_top = 0;
+       uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
+
        if (!dmub->sw_init)
                return DMUB_STATUS_INVALID;
 
        memset(out, 0, sizeof(*out));
+       memset(window_sizes, 0, sizeof(window_sizes));
 
        out->num_regions = DMUB_NUM_WINDOWS;
 
-       inst->base = 0x0;
-       inst->top = inst->base + params->inst_const_size;
-
-       data->base = dmub_align(inst->top, 256);
-       data->top = data->base + params->bss_data_size;
-
-       /*
-        * All cache windows below should be aligned to the size
-        * of the DMCUB cache line, 64 bytes.
-        */
-
-       stack->base = dmub_align(data->top, 256);
-       stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
-
-       bios->base = dmub_align(stack->top, 256);
-       bios->top = bios->base + params->vbios_size;
-
-       if (params->is_mailbox_in_inbox) {
-               mail->base = 0;
-               mail->top = mail->base + DMUB_MAILBOX_SIZE;
-               previous_top = bios->top;
-       } else {
-               mail->base = dmub_align(bios->top, 256);
-               mail->top = mail->base + DMUB_MAILBOX_SIZE;
-               previous_top = mail->top;
-       }
-
        fw_info = dmub_get_fw_meta_info(params);
 
        if (fw_info) {
@@ -486,19 +472,20 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
                        dmub->fw_version = fw_info->fw_version;
        }
 
-       trace_buff->base = dmub_align(previous_top, 256);
-       trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
-
-       fw_state->base = dmub_align(trace_buff->top, 256);
-       fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
+       window_sizes[DMUB_WINDOW_0_INST_CONST] = params->inst_const_size;
+       window_sizes[DMUB_WINDOW_1_STACK] = DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE;
+       window_sizes[DMUB_WINDOW_2_BSS_DATA] = params->bss_data_size;
+       window_sizes[DMUB_WINDOW_3_VBIOS] = params->vbios_size;
+       window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
+       window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
+       window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
+       window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
 
-       scratch_mem->base = dmub_align(fw_state->top, 256);
-       scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+       out->fb_size =
+               dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
 
-       out->fb_size = dmub_align(scratch_mem->top, 4096);
-
-       if (params->is_mailbox_in_inbox)
-               out->inbox_size = dmub_align(mail->top, 4096);
+       out->gart_size =
+               dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_GART);
 
        return DMUB_STATUS_OK;
 }
@@ -507,8 +494,6 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
                                       const struct dmub_srv_memory_params *params,
                                       struct dmub_srv_fb_info *out)
 {
-       uint8_t *cpu_base;
-       uint64_t gpu_base;
        uint32_t i;
 
        if (!dmub->sw_init)
@@ -519,19 +504,16 @@ enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
        if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
                return DMUB_STATUS_INVALID;
 
-       cpu_base = (uint8_t *)params->cpu_fb_addr;
-       gpu_base = params->gpu_fb_addr;
-
        for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
                const struct dmub_region *reg =
                        &params->region_info->regions[i];
 
-               out->fb[i].cpu_addr = cpu_base + reg->base;
-               out->fb[i].gpu_addr = gpu_base + reg->base;
-
-               if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
-                       out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
-                       out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
+               if (params->window_memory_type[i] == DMUB_WINDOW_MEMORY_TYPE_GART) {
+                       out->fb[i].cpu_addr = (uint8_t *)params->cpu_gart_addr + reg->base;
+                       out->fb[i].gpu_addr = params->gpu_gart_addr + reg->base;
+               } else {
+                       out->fb[i].cpu_addr = (uint8_t *)params->cpu_fb_addr + reg->base;
+                       out->fb[i].gpu_addr = params->gpu_fb_addr + reg->base;
                }
 
                out->fb[i].size = reg->top - reg->base;
@@ -809,11 +791,20 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
 
 bool dmub_srv_is_hw_pwr_up(struct dmub_srv *dmub)
 {
+       union dmub_fw_boot_status status;
+
        if (!dmub->hw_funcs.is_hw_powered_up)
                return true;
 
-       return dmub->hw_funcs.is_hw_powered_up(dmub) &&
-               dmub->hw_funcs.is_hw_init(dmub);
+       if (!dmub->hw_funcs.is_hw_powered_up(dmub))
+               return false;
+
+       if (!dmub->hw_funcs.is_hw_init(dmub))
+               return false;
+
+       status = dmub->hw_funcs.get_fw_status(dmub);
+
+       return status.bits.dal_fw && status.bits.mailbox_rdy;
 }
 
 enum dmub_status dmub_srv_wait_for_hw_pwr_up(struct dmub_srv *dmub,
index 915a031a43cb286fdb03f2fb2788d0fa9e539b59..e4a26143f14c940f73bb0975bfd6b97d1c3f2d73 100644 (file)
 #define __AUDIO_TYPES_H__
 
 #include "signal_types.h"
+#include "fixed31_32.h"
+#include "dc_dp_types.h"
 
 #define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
 #define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
 #define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
 
+struct audio_dp_link_info {
+       uint32_t link_bandwidth_kbps;
+       uint32_t hblank_min_symbol_width;
+       enum dp_link_encoding encoding;
+       enum dc_link_rate link_rate;
+       enum dc_lane_count lane_count;
+       bool is_mst;
+};
 
 struct audio_crtc_info {
        uint32_t h_total;
@@ -42,7 +52,10 @@ struct audio_crtc_info {
        uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */
        uint32_t refresh_rate;
        enum dc_color_depth color_depth;
+       enum dc_pixel_encoding pixel_encoding;
        bool interlaced;
+       uint32_t dsc_bits_per_pixel;
+       uint32_t dsc_num_slices;
 };
 struct azalia_clock_info {
        uint32_t pixel_clock_in_10khz;
@@ -95,6 +108,8 @@ struct audio_output {
        enum signal_type signal;
        /* video timing */
        struct audio_crtc_info crtc_info;
+       /* DP link info */
+       struct audio_dp_link_info dp_link_info;
        /* PLL for audio */
        struct audio_pll_info pll_info;
 };
index df2c7ffe190f4db36050901dce5af89180646f3b..a89d93154ddbf67d4700cd4a8d3fe8b846155e06 100644 (file)
@@ -244,6 +244,7 @@ enum DC_FEATURE_MASK {
        DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
        DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
        DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+       DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
 };
 
 enum DC_DEBUG_MASK {
index af1c46991429be573c68a5ebe76cda97a26ec94a..7dd876f7df74c5369f5e26c24a32c283dd2aa89d 100644 (file)
 #define MAX_SEGMENT                                         6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 } __maybe_unused;
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index 222fa8d1326966cefaa04e78274edd0d7332d7ea..a05bf8e4f58d9630fc100d03bb330f23f3b410b3 100644 (file)
 #define regDTBCLK_DTO2_MODULO_BASE_IDX                                                                  2
 #define regDTBCLK_DTO3_MODULO                                                                           0x0022
 #define regDTBCLK_DTO3_MODULO_BASE_IDX                                                                  2
+#define regHDMICHARCLK0_CLOCK_CNTL                                                                      0x004a
+#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX                                                             2
 #define regPHYASYMCLK_CLOCK_CNTL                                                                        0x0052
 #define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX                                                               2
 #define regPHYBSYMCLK_CLOCK_CNTL                                                                        0x0053
 #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX                                                               2
 #define regPHYFSYMCLK_CLOCK_CNTL                                                                        0x0057
 #define regPHYFSYMCLK_CLOCK_CNTL_BASE_IDX                                                               2
+#define regHDMISTREAMCLK_CNTL                                                                           0x0059
+#define regHDMISTREAMCLK_CNTL_BASE_IDX                                                                  2
 #define regDCCG_GATE_DISABLE_CNTL3                                                                      0x005a
 #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX                                                             2
 #define regHDMISTREAMCLK0_DTO_PARAM                                                                     0x005b
index 8ddb03a1dc394d4f92ea00d5a82ea24788d9be5b..df84941bbe5be562d0d536fdecf7b111dd2d9d7f 100644 (file)
 //DTBCLK_DTO3_MODULO
 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT                                                         0x0
 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK                                                           0xFFFFFFFFL
+//HDMICHARCLK0_CLOCK_CNTL
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT                                                       0x0
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT                                                  0x4
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK                                                         0x00000001L
+#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK                                                    0x00000070L
 //PHYASYMCLK_CLOCK_CNTL
 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT                                                     0x0
 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT                                                0x4
 #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL__SHIFT                                                0x4
 #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_EN_MASK                                                       0x00000001L
 #define PHYFSYMCLK_CLOCK_CNTL__PHYFSYMCLK_FORCE_SRC_SEL_MASK                                                  0x00000030L
+//HDMISTREAMCLK_CNTL
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT                                                     0x0
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT                                               0x10
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK                                                       0x00000003L
+#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK                                                 0x00010000L
 //DCCG_GATE_DISABLE_CNTL3
 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT                                           0x0
 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT                                           0x1
index 7cf0a625277b1ee6e6e692f799091b3fa7185e29..33b5d9be06b185f51d0a3d7cf720fcaccff64f6e 100644 (file)
 #define regCM0_CM_DEALPHA_BASE_IDX                                                                      2
 #define regCM0_CM_COEF_FORMAT                                                                           0x0d8c
 #define regCM0_CM_COEF_FORMAT_BASE_IDX                                                                  2
+#define regCM0_CM_TEST_DEBUG_INDEX                                                                      0x0d8d
+#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define regCM0_CM_TEST_DEBUG_DATA                                                                       0x0d8e
+#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 
 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 #define regCM1_CM_DEALPHA_BASE_IDX                                                                      2
 #define regCM1_CM_COEF_FORMAT                                                                           0x0ef7
 #define regCM1_CM_COEF_FORMAT_BASE_IDX                                                                  2
+#define regCM1_CM_TEST_DEBUG_INDEX                                                                      0x0ef8
+#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define regCM1_CM_TEST_DEBUG_DATA                                                                       0x0ef9
+#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 
 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 #define regCM2_CM_DEALPHA_BASE_IDX                                                                      2
 #define regCM2_CM_COEF_FORMAT                                                                           0x1062
 #define regCM2_CM_COEF_FORMAT_BASE_IDX                                                                  2
+#define regCM2_CM_TEST_DEBUG_INDEX                                                                      0x1063
+#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define regCM2_CM_TEST_DEBUG_DATA                                                                       0x1064
+#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 
 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 #define regCM3_CM_DEALPHA_BASE_IDX                                                                      2
 #define regCM3_CM_COEF_FORMAT                                                                           0x11cd
 #define regCM3_CM_COEF_FORMAT_BASE_IDX                                                                  2
+#define regCM3_CM_TEST_DEBUG_INDEX                                                                      0x11ce
+#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX                                                             2
+#define regCM3_CM_TEST_DEBUG_DATA                                                                       0x11cf
+#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX                                                              2
 
 
 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec
 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL                                           0x3035
 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE                                                             0x303a
+#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX                                                    2
 
 
 // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL                                           0x3091
 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE                                                             0x3096
+#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX                                                    2
 
 
 // addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL                                           0x30ed
 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE                                                             0x30f2
+#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX                                                    2
 
 
 // addressBlock: dce_dc_dsc2_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL                                           0x3149
 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX                                  2
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE                                                             0x314e
+#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX                                                    2
 
 
 // addressBlock: dce_dc_dsc3_dispdec_dsc_dcperfmon_dc_perfmon_dispdec
index fca72e2ec92947d8f792c0e51974723c762fbae6..ff77b71167eb71a0495af2e288acd30af86ce4c2 100644 (file)
 #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK                                                               0x00000001L
 #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK                                                      0x00000010L
 #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK                                                   0x00000100L
+
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT                                                   0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT                                                0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK                                                     0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK                                                  0x00000100L
+
 #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT                                           0x0
 #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT                                          0x9
 #define DC_PERFMON10_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT                                            0xc
 #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT                                                            0x8
 #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK                                                              0x00000001L
 #define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK                                                              0x00000100L
+
+//DIG0_DIG_BE_CLK_CNTL
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE__SHIFT                                                              0x0
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN__SHIFT                                                            0x4
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET__SHIFT                                                        0x5
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET__SHIFT                                                          0x6
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON__SHIFT                                                 0xb
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON__SHIFT                                            0xc
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON__SHIFT                                            0xd
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_MODE_MASK                                                                0x00000007L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_CLK_EN_MASK                                                              0x00000010L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SOFT_RESET_MASK                                                          0x00000020L
+#define DIG0_DIG_BE_CLK_CNTL__HDCP_SOFT_RESET_MASK                                                            0x00000040L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_CLOCK_ON_MASK                                                   0x00000800L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_HDCP_CLOCK_ON_MASK                                              0x00001000L
+#define DIG0_DIG_BE_CLK_CNTL__DIG_BE_SYMCLK_G_TMDS_CLOCK_ON_MASK                                              0x00002000L
+
 #define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT                                                         0x0
 #define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT                                                                     0x1
 #define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT                                                             0x2
 #define DSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_MASK  0x0003FFFFL
 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT  0x0
 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK  0x0003FFFFL
+
+//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT                                  0x0
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT                                  0x8
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT                                  0x10
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT                                  0x18
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK                                    0x0000001FL
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK                                    0x00001F00L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK                                    0x001F0000L
+#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK                                    0x1F000000L
+
 #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT                                           0x0
 #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT                                          0x9
 #define DC_PERFMON17_PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT                                            0xc
 #define DWB_OGAM_LUT_INDEX__DWB_OGAM_LUT_INDEX_MASK                                                           0x000001FFL
 #define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA__SHIFT                                                           0x0
 #define DWB_OGAM_LUT_DATA__DWB_OGAM_LUT_DATA_MASK                                                             0x0003FFFFL
+//DWB_OGAM_LUT_CONTROL
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT                                            0x0
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT                                              0x4
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG__SHIFT                                                    0x8
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT                                                    0xc
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE__SHIFT                                                 0x10
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK_MASK                                              0x00000007L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL_MASK                                                0x00000030L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_DBG_MASK                                                      0x00000100L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL_MASK                                                      0x00001000L
+#define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_CONFIG_MODE_MASK                                                   0x00010000L
+
 #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_WRITE_COLOR_MASK__SHIFT                                            0x0
 #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_READ_COLOR_SEL__SHIFT                                              0x4
 #define DWB_OGAM_LUT_CONTROL__DWB_OGAM_LUT_HOST_SEL__SHIFT                                                    0xc
 #define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS__SHIFT                                                                0x10
 #define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS__SHIFT                                                                0x11
 #define DIO_CLK_CNTL__DIO_FGCG_REP_DIS__SHIFT                                                                 0x14
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS__SHIFT                                                          0x15
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS__SHIFT                                                          0x16
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS__SHIFT                                                          0x17
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS__SHIFT                                                          0x18
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS__SHIFT                                                          0x19
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS__SHIFT                                                          0x1a
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS__SHIFT                                                          0x1b
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS__SHIFT                                                          0x1c
 #define DIO_CLK_CNTL__DIO_TEST_CLK_SEL_MASK                                                                   0x0000007FL
 #define DIO_CLK_CNTL__DISPCLK_R_GATE_DIS_MASK                                                                 0x00000200L
 #define DIO_CLK_CNTL__DISPCLK_G_GATE_DIS_MASK                                                                 0x00000400L
 #define DIO_CLK_CNTL__SYMCLK_R_GATE_DIS_MASK                                                                  0x00010000L
 #define DIO_CLK_CNTL__SYMCLK_G_GATE_DIS_MASK                                                                  0x00020000L
 #define DIO_CLK_CNTL__DIO_FGCG_REP_DIS_MASK                                                                   0x00100000L
+
+#define DIO_CLK_CNTL__DISPCLK_G_HDCP_GATE_DIS_MASK                                                            0x00200000L
+#define DIO_CLK_CNTL__SYMCLKA_G_HDCP_GATE_DIS_MASK                                                            0x00400000L
+#define DIO_CLK_CNTL__SYMCLKB_G_HDCP_GATE_DIS_MASK                                                            0x00800000L
+#define DIO_CLK_CNTL__SYMCLKC_G_HDCP_GATE_DIS_MASK                                                            0x01000000L
+#define DIO_CLK_CNTL__SYMCLKD_G_HDCP_GATE_DIS_MASK                                                            0x02000000L
+#define DIO_CLK_CNTL__SYMCLKE_G_HDCP_GATE_DIS_MASK                                                            0x04000000L
+#define DIO_CLK_CNTL__SYMCLKF_G_HDCP_GATE_DIS_MASK                                                            0x08000000L
+#define DIO_CLK_CNTL__SYMCLKG_G_HDCP_GATE_DIS_MASK                                                            0x10000000L
+
 #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS__SHIFT                                             0x0
 #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_MESSAGE__SHIFT                                            0x1
 #define DIO_PSP_INTERRUPT_STATUS__DIO_PSP_INTERRUPT_STATUS_MASK                                               0x00000001L
index e8fae5c77514a257a9b04483e1af2a354a923e57..2bfd6d0ff050033cb972309cba15189268a43c94 100644 (file)
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
 #define CU8(ptr) get_u8(ctx->bios, (ptr))
 static inline uint16_t get_u16(void *bios, int ptr)
 {
-    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+    return get_u8(biosptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
 }
 #define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
 #define CU16(ptr) get_u16(ctx->bios, (ptr))
index 26044cb285d29523d1eabcf377040b2fcd2521f7..48542ea6882a168dfb2223e43e4f33de853cb40a 100644 (file)
 #define MAX_SEGMENT                                         6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 };
 
index 60a6536ff656d70a4ade58c3a84c57798157415c..f40b6a03fe63455c22625fb365450e9928de61db 100644 (file)
@@ -149,27 +149,26 @@ struct cgs_ops {
 
 struct cgs_os_ops; /* To be define in OS-specific CGS header */
 
-struct cgs_device
-{
+struct cgs_device {
        const struct cgs_ops *ops;
        /* to be embedded at the start of driver private structure */
 };
 
 /* Convenience macros that make CGS indirect function calls look like
  * normal function calls */
-#define CGS_CALL(func,dev,...) \
+#define CGS_CALL(func, dev, ...) \
        (((struct cgs_device *)dev)->ops->func(dev, ##__VA_ARGS__))
-#define CGS_OS_CALL(func,dev,...) \
+#define CGS_OS_CALL(func, dev, ...) \
        (((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
 
-#define cgs_read_register(dev,offset)          \
-       CGS_CALL(read_register,dev,offset)
-#define cgs_write_register(dev,offset,value)           \
-       CGS_CALL(write_register,dev,offset,value)
-#define cgs_read_ind_register(dev,space,index)         \
-       CGS_CALL(read_ind_register,dev,space,index)
-#define cgs_write_ind_register(dev,space,index,value)          \
-       CGS_CALL(write_ind_register,dev,space,index,value)
+#define cgs_read_register(dev, offset)         \
+       CGS_CALL(read_register, dev, offset)
+#define cgs_write_register(dev, offset, value)         \
+       CGS_CALL(write_register, dev, offset, value)
+#define cgs_read_ind_register(dev, space, index)               \
+       CGS_CALL(read_ind_register, dev, space, index)
+#define cgs_write_ind_register(dev, space, index, value)               \
+       CGS_CALL(write_ind_register, dev, space, index, value)
 
 #define cgs_get_firmware_info(dev, type, info) \
        CGS_CALL(get_firmware_info, dev, type, info)
index ce79e5de8ce3dc320e653b0db9870ac374f1940b..1a73296a9a74a41957514e444747a39c1bc34dec 100644 (file)
 #define MAX_SEGMENT                                        5
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 } __maybe_unused;
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index f84996a73de94dd2f052acbb4428d17b17a975ce..53cb4296df88a32f5cdbb01ffd7fd4f594429894 100644 (file)
 #define MAX_SEGMENT                                         6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index 1d93a0c574c9e9d3194c5a2a5b4431c90a47aa76..acd1cef61b7c53013672d242d197d77b0a5bfde1 100644 (file)
@@ -27,7 +27,7 @@
 
 #define PP_MAX_CLOCK_LEVELS 16
 
-enum amd_pp_display_config_type{
+enum amd_pp_display_config_type {
        AMD_PP_DisplayConfigType_None = 0,
        AMD_PP_DisplayConfigType_DP54 ,
        AMD_PP_DisplayConfigType_DP432 ,
@@ -36,8 +36,8 @@ enum amd_pp_display_config_type{
        AMD_PP_DisplayConfigType_DP243,
        AMD_PP_DisplayConfigType_DP216,
        AMD_PP_DisplayConfigType_DP162,
-       AMD_PP_DisplayConfigType_HDMI6G ,
-       AMD_PP_DisplayConfigType_HDMI297 ,
+       AMD_PP_DisplayConfigType_HDMI6G,
+       AMD_PP_DisplayConfigType_HDMI297,
        AMD_PP_DisplayConfigType_HDMI162,
        AMD_PP_DisplayConfigType_LVDS,
        AMD_PP_DisplayConfigType_DVI,
@@ -45,8 +45,7 @@ enum amd_pp_display_config_type{
        AMD_PP_DisplayConfigType_VGA
 };
 
-struct single_display_configuration
-{
+struct single_display_configuration {
        uint32_t controller_index;
        uint32_t controller_id;
        uint32_t signal_type;
index edcb85560cede5b42989eb7337e04c9df1fb7233..32054ecf0b87e342453485d86d0329565cddb4c5 100644 (file)
@@ -244,8 +244,7 @@ enum pp_df_cstate {
  * @PP_PWR_LIMIT_DEFAULT: Default Power Limit
  * @PP_PWR_LIMIT_MAX: Maximum Power Limit
  */
-enum pp_power_limit_level
-{
+enum pp_power_limit_level {
        PP_PWR_LIMIT_MIN = -1,
        PP_PWR_LIMIT_CURRENT,
        PP_PWR_LIMIT_DEFAULT,
@@ -260,8 +259,7 @@ enum pp_power_limit_level
  * @PP_PWR_TYPE_FAST: manages the ~10 ms moving average of APU power,
  * where supported.
  */
-enum pp_power_type
-{
+enum pp_power_type {
        PP_PWR_TYPE_SUSTAINED,
        PP_PWR_TYPE_FAST,
 };
index d8fc00478b6a07555490a380f607064577304f60..e94d80ec8d92e8d45e2316cc9d624f7ffa8c967b 100644 (file)
 #define MAX_SEGMENT                                        5
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index c39ef651adc6f738d0cc08e97038b267445a5a45..508011288dea0aafc7159bd199d269b2635f66ed 100644 (file)
 #define MAX_SEGMENT                                        5
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index 5aac8d545bdc6d45ef4719011307c308579f730e..2e8e6c9875f6c619db989b918811d1c21a8a5b7d 100644 (file)
@@ -491,7 +491,7 @@ typedef struct _ClockInfoArray{
     //sizeof(ATOM_PPLIB_CLOCK_INFO)
     UCHAR ucEntrySize;
     
-    UCHAR clockInfo[1];
+    UCHAR clockInfo[];
 }ClockInfoArray;
 
 typedef struct _NonClockInfoArray{
@@ -501,7 +501,7 @@ typedef struct _NonClockInfoArray{
     //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
     UCHAR ucEntrySize;
     
-    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[];
 }NonClockInfoArray;
 
 typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
@@ -658,7 +658,7 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
 
 typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
     UCHAR numEntries;
-    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+    ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[];
 }ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
 
 typedef struct _ATOM_PPLIB_SAMU_Table
index 7dff85c81e5a7ea09cf0ed9905bfb101690f45b3..fa023cfdf72d0af952120cf6dad482d0da5002ef 100644 (file)
 #define MAX_SEGMENT                                        5
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index b07bc2dd895dc7d916f6f14efdfbc5f16f25a134..054790470800373613dccfbd2c0cfd7a11e98c64 100644 (file)
 #define MAX_SEGMENT                                         5
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index c0e98a98a641510a9284046aabc505ea11b098c4..58002a83d1dfd3426e76936cd1f3526b082bf563 100644 (file)
@@ -24,8 +24,7 @@
 #ifndef V10_STRUCTS_H_
 #define V10_STRUCTS_H_
 
-struct v10_gfx_mqd
-{
+struct v10_gfx_mqd {
        uint32_t reserved_0; // offset: 0  (0x0)
        uint32_t reserved_1; // offset: 1  (0x1)
        uint32_t reserved_2; // offset: 2  (0x2)
index 691073ed780ecd713c100f76824a126b5c783240..695d7d04dfa60e795e29d9641f1655eb8c7f2b64 100644 (file)
 #define MAX_SEGMENT                                         6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
index 3a22a5d169193fadf712ba39e4af419a14f1aea9..1e1ca69f21f78a9ea8760ce759fbdb6980df3076 100644 (file)
 #define MAX_INSTANCE                                       5
 #define MAX_SEGMENT                                        5
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 };
 
index 1deb68f3d3341a91ebdaafda1bb6985657cadc1d..92cf2d9e767f8b5659804f3d60d10a63f81eac5f 100644 (file)
 #define MAX_SEGMENT                                        6
 
 
-struct IP_BASE_INSTANCE
-{
+struct IP_BASE_INSTANCE {
     unsigned int segment[MAX_SEGMENT];
 };
 
-struct IP_BASE
-{
+struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
 } __maybe_unused;
 
 
-static const struct IP_BASE ATHUB_BASE            ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0, 0, 0, 0, 0 } },
+                                         { { 0, 0, 0, 0, 0, 0 } },
+                                         { { 0, 0, 0, 0, 0, 0 } },
+                                         { { 0, 0, 0, 0, 0, 0 } },
+                                         { { 0, 0, 0, 0, 0, 0 } },
+                                         { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE CLK_BASE  = { { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE CLK_BASE            ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } },
+static const struct IP_BASE DCE_BASE = { { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+                                       { { 0, 0, 0, 0, 0, 0 } },
+                                       { { 0, 0, 0, 0, 0, 0 } },
+                                       { { 0, 0, 0, 0, 0, 0 } },
+                                       { { 0, 0, 0, 0, 0, 0 } },
+                                       { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE DF_BASE = { { { { 0x00007000, 0, 0, 0, 0, 0 } },
+                                      { { 0, 0, 0, 0, 0, 0 } },
+                                      { { 0, 0, 0, 0, 0, 0 } },
+                                      { { 0, 0, 0, 0, 0, 0 } },
+                                      { { 0, 0, 0, 0, 0, 0 } },
+                                      { { 0, 0, 0, 0, 0, 0 } } } };
+static const struct IP_BASE FUSE_BASE = { { { { 0x00017400, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DCE_BASE            ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } },
+static const struct IP_BASE GC_BASE = { { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE DF_BASE            ={ { { { 0x00007000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE HDP_BASE = { { { { 0x00000F20, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE FUSE_BASE            ={ { { { 0x00017400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MMHUB_BASE = { { { { 0x0001A000, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE GC_BASE            ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } },
+static const struct IP_BASE MP0_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE HDP_BASE            ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE MP1_BASE = { { { { 0x00016000, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MMHUB_BASE            ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP0_BASE            ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE OSSSYS_BASE = { { { { 0x000010A0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE MP1_BASE            ={ { { { 0x00016000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SDMA0_BASE = { { { { 0x00001260, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE NBIO_BASE            ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } },
+static const struct IP_BASE SDMA1_BASE = { { { { 0x00001860, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE OSSSYS_BASE            ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE SMUIO_BASE = { { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA0_BASE            ={ { { { 0x00001260, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE THM_BASE = { { { { 0x00016600, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SDMA1_BASE            ={ { { { 0x00001860, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE UMC_BASE = { { { { 0x00014000, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE SMUIO_BASE            ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE THM_BASE            ={ { { { 0x00016600, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UMC_BASE            ={ { { { 0x00014000, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } },
-                                        { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE UVD_BASE            ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
+static const struct IP_BASE UVD_BASE = { { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } },
                                         { { 0, 0x00009000, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
 /* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/
-static const struct IP_BASE VCE_BASE            ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE VCE_BASE { { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE XDMA_BASE            ={ { { { 0x00003400, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE XDMA_BASE { { { { 0x00003400, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } } } };
-static const struct IP_BASE RSMU_BASE            ={ { { { 0x00012000, 0, 0, 0, 0, 0 } },
+static const struct IP_BASE RSMU_BASE { { { { 0x00012000, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
index f503e61faa6008f588c9bc243599238cb3cb7269..b1b4c09c34671e9e284345fdf3316fb4384e1d07 100644 (file)
@@ -226,7 +226,7 @@ int atomctrl_set_engine_dram_timings_rv770(
 
        return amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
-                       (uint32_t *)&engine_clock_parameters);
+                       (uint32_t *)&engine_clock_parameters, sizeof(engine_clock_parameters));
 }
 
 /*
@@ -297,7 +297,7 @@ int atomctrl_get_memory_pll_dividers_si(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
-               (uint32_t *)&mpll_parameters);
+               (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
 
        if (0 == result) {
                mpll_param->mpll_fb_divider.clk_frac =
@@ -345,7 +345,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
-                       (uint32_t *)&mpll_parameters);
+                       (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
 
        if (!result)
                mpll_param->mpll_post_divider =
@@ -366,7 +366,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
-                       (uint32_t *)&mpll_parameters);
+                       (uint32_t *)&mpll_parameters, sizeof(mpll_parameters));
 
        /* VEGAM's mpll takes sometime to finish computing */
        udelay(10);
@@ -396,7 +396,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-               (uint32_t *)&pll_parameters);
+               (uint32_t *)&pll_parameters, sizeof(pll_parameters));
 
        if (0 == result) {
                dividers->pll_post_divider = pll_parameters.ucPostDiv;
@@ -420,7 +420,7 @@ int atomctrl_get_engine_pll_dividers_vi(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-               (uint32_t *)&pll_patameters);
+               (uint32_t *)&pll_patameters, sizeof(pll_patameters));
 
        if (0 == result) {
                dividers->pll_post_divider =
@@ -457,7 +457,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-               (uint32_t *)&pll_patameters);
+               (uint32_t *)&pll_patameters, sizeof(pll_patameters));
 
        if (0 == result) {
                dividers->usSclk_fcw_frac     = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
@@ -490,7 +490,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
-               (uint32_t *)&pll_patameters);
+               (uint32_t *)&pll_patameters, sizeof(pll_patameters));
 
        if (0 == result) {
                dividers->pll_post_divider =
@@ -773,7 +773,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -794,7 +794,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -814,7 +814,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -835,7 +835,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -857,7 +857,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
        if (result)
                return result;
 
@@ -878,7 +878,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -909,7 +909,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&sOutput_FuseValues);
+                       (uint32_t *)&sOutput_FuseValues, sizeof(sOutput_FuseValues));
 
        if (result)
                return result;
@@ -1134,7 +1134,7 @@ int atomctrl_get_voltage_evv_on_sclk(
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-                       (uint32_t *)&get_voltage_info_param_space);
+                       (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
 
        *voltage = result ? 0 :
                        le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
@@ -1179,7 +1179,7 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-                       (uint32_t *)&get_voltage_info_param_space);
+                       (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
 
        if (0 != result)
                return result;
@@ -1359,7 +1359,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
-                       (uint32_t *)&efuse_param);
+                       (uint32_t *)&efuse_param, sizeof(efuse_param));
        *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask;
 
        return result;
@@ -1380,7 +1380,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
-               (uint32_t *)&memory_clock_parameters);
+               (uint32_t *)&memory_clock_parameters, sizeof(memory_clock_parameters));
 
        return result;
 }
@@ -1399,7 +1399,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
-                       (uint32_t *)&get_voltage_info_param_space);
+                       (uint32_t *)&get_voltage_info_param_space, sizeof(get_voltage_info_param_space));
 
        *voltage = result ? 0 :
                le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
@@ -1526,7 +1526,7 @@ int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual
 
        result = amdgpu_atom_execute_table(adev->mode_info.atom_context,
                        GetIndexIntoMasterTable(COMMAND, SetVoltage),
-                       (uint32_t *)voltage_parameters);
+                       (uint32_t *)voltage_parameters, sizeof(*voltage_parameters));
 
        *virtual_voltage_id = voltage_parameters->usVoltageLevel;
 
index a47a47238e2b9b01a9a423978ff8d2b70918a9d4..82d540334318d29111ec27fa7dedba47f0f36a13 100644 (file)
@@ -258,7 +258,7 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
        idx = GetIndexIntoMasterCmdTable(computegpuclockparam);
 
        if (amdgpu_atom_execute_table(
-               adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters))
+               adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters, sizeof(pll_parameters)))
                return -EINVAL;
 
        pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *)
@@ -505,7 +505,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
        ix = GetIndexIntoMasterCmdTable(getsmuclockinfo);
 
        if (amdgpu_atom_execute_table(
-               adev->mode_info.atom_context, ix, (uint32_t *)&parameters))
+               adev->mode_info.atom_context, ix, (uint32_t *)&parameters, sizeof(parameters)))
                return -EINVAL;
 
        output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&parameters;
index c7bfa68bf00f400f3396c9853d2c08c6bf971659..f6545093bfc14a349138dc43960f21dbc90625ff 100644 (file)
@@ -514,7 +514,7 @@ static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
                                            getsmuclockinfo);
 
        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
-                                       (uint32_t *)&input);
+                                       (uint32_t *)&input, sizeof(input));
        if (ret)
                return -EINVAL;
 
@@ -1432,24 +1432,24 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
                orderly_poweroff(true);
        } else if (client_id == SOC15_IH_CLIENTID_MP1) {
-               if (src_id == 0xfe) {
+               if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
                        /* ACK SMUToHost interrupt */
                        data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
                        data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
                        WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
 
                        switch (ctxid) {
-                       case 0x3:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                schedule_work(&smu->interrupt_work);
                                adev->pm.ac_power = true;
                                break;
-                       case 0x4:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                schedule_work(&smu->interrupt_work);
                                adev->pm.ac_power = false;
                                break;
-                       case 0x7:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
                                /*
                                 * Increment the throttle interrupt counter
                                 */
@@ -1462,6 +1462,10 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                        schedule_work(&smu->throttling_logging_work);
 
                                break;
+                       default:
+                               dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+                                                                       ctxid, client_id);
+                               break;
                        }
                }
        }
@@ -1504,7 +1508,7 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
                return ret;
 
        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
-                               0xfe,
+                               SMU_IH_INTERRUPT_ID_TO_DRIVER,
                                irq_src);
        if (ret)
                return ret;
index 5e408a1958604aaa3f590a6e4615bd9c6c346d5f..ed15f5a0fd119f7f821d8411fde458ddf6771c8f 100644 (file)
@@ -301,7 +301,7 @@ static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
                                            getsmuclockinfo);
 
        ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
-                                       (uint32_t *)&input);
+                                       (uint32_t *)&input, sizeof(input));
        if (ret)
                return -EINVAL;
 
index c486182ff275222fedfaa1e27c417f9be80d19d0..48170bb5112ea05ef12c09b66f0ac07c950abc72 100644 (file)
@@ -1369,24 +1369,24 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
                orderly_poweroff(true);
        } else if (client_id == SOC15_IH_CLIENTID_MP1) {
-               if (src_id == 0xfe) {
+               if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
                        /* ACK SMUToHost interrupt */
                        data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
                        data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
                        WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
 
                        switch (ctxid) {
-                       case 0x3:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
                                adev->pm.ac_power = true;
                                break;
-                       case 0x4:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
                                adev->pm.ac_power = false;
                                break;
-                       case 0x7:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
                                /*
                                 * Increment the throttle interrupt counter
                                 */
@@ -1399,7 +1399,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                                        schedule_work(&smu->throttling_logging_work);
 
                                break;
-                       case 0x8:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL:
                                high = smu->thermal_range.software_shutdown_temp +
                                        smu->thermal_range.software_shutdown_temp_offset;
                                high = min_t(typeof(high),
@@ -1416,7 +1416,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                                data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
                                WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
                                break;
-                       case 0x9:
+                       case SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY:
                                high = min_t(typeof(high),
                                             SMU_THERMAL_MAXIMUM_ALERT_TEMP,
                                             smu->thermal_range.software_shutdown_temp);
@@ -1429,6 +1429,10 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                                data = data & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
                                WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, data);
                                break;
+                       default:
+                               dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+                                                                       ctxid, client_id);
+                               break;
                        }
                }
        }
@@ -1473,7 +1477,7 @@ int smu_v13_0_register_irq_handler(struct smu_context *smu)
                return ret;
 
        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
-                               0xfe,
+                               SMU_IH_INTERRUPT_ID_TO_DRIVER,
                                irq_src);
        if (ret)
                return ret;
index 7e1941cf17964c594dc8821c3fcda75f64e9f145..1b96158b63bf468e0394cf364650f549598c3725 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/pci.h>
 #include "amdgpu_ras.h"
 #include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
 #include "smu_cmn.h"
 #include "mp/mp_13_0_6_offset.h"
 #include "mp/mp_13_0_6_sh_mask.h"
@@ -1438,7 +1439,10 @@ static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
                                                        entry->src_data[1]);
                                        schedule_work(&smu->throttling_logging_work);
                                }
-
+                               break;
+                       default:
+                               dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
+                                                                       ctxid, client_id);
                                break;
                        }
                }
@@ -2547,18 +2551,22 @@ static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
                                     enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
 {
        uint64_t status0;
+       uint32_t ext_error_code;
+       uint32_t odecc_err_cnt;
 
        status0 = entry->regs[MCA_REG_IDX_STATUS];
+       ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status0);
+       odecc_err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]);
 
        if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
                *count = 0;
                return 0;
        }
 
-       if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(adev, status0))
-               *count = 1;
-       else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(adev, status0))
-               *count = 1;
+       if (umc_v12_0_is_deferred_error(adev, status0) ||
+           umc_v12_0_is_uncorrectable_error(adev, status0) ||
+           umc_v12_0_is_correctable_error(adev, status0))
+               *count = (ext_error_code == 0) ? odecc_err_cnt : 1;
 
        return 0;
 }
@@ -2857,6 +2865,143 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
        .mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
 };
 
+static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       return smu_v13_0_6_mca_set_debug_mode(smu, enable);
+}
+
+static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count)
+{
+       uint32_t msg;
+       int ret;
+
+       if (!count)
+               return -EINVAL;
+
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+               msg = SMU_MSG_QueryValidMcaCount;
+               break;
+       case ACA_ERROR_TYPE_CE:
+               msg = SMU_MSG_QueryValidMcaCeCount;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = smu_cmn_send_smc_msg(smu, msg, count);
+       if (ret) {
+               *count = 0;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev,
+                                      enum aca_error_type type, u32 *count)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret;
+
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+       case ACA_ERROR_TYPE_CE:
+               ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+                                      int idx, int offset, u32 *val)
+{
+       uint32_t msg, param;
+
+       switch (type) {
+       case ACA_ERROR_TYPE_UE:
+               msg = SMU_MSG_McaBankDumpDW;
+               break;
+       case ACA_ERROR_TYPE_CE:
+               msg = SMU_MSG_McaBankCeDumpDW;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       param = ((idx & 0xffff) << 16) | (offset & 0xfffc);
+
+       return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
+}
+
+static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type,
+                                    int idx, int offset, u32 *val, int count)
+{
+       int ret, i;
+
+       if (!val)
+               return -EINVAL;
+
+       for (i = 0; i < count; i++) {
+               ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type,
+                            int idx, int reg_idx, u64 *val)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       u32 data[2] = {0, 0};
+       int ret;
+
+       if (!val || reg_idx >= ACA_REG_IDX_COUNT)
+               return -EINVAL;
+
+       ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
+       if (ret)
+               return ret;
+
+       *val = (u64)data[1] << 32 | data[0];
+
+       dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n",
+               type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val);
+
+       return 0;
+}
+
+static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev,
+                                     enum aca_error_type type, int idx, struct aca_bank *bank)
+{
+       int i, ret, count;
+
+       count = min_t(int, 16, ARRAY_SIZE(bank->regs));
+       for (i = 0; i < count; i++) {
+               ret = aca_bank_read_reg(adev, type, idx, i, &bank->regs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
+       .max_ue_bank_count = 12,
+       .max_ce_bank_count = 12,
+       .set_debug_mode = aca_smu_set_debug_mode,
+       .get_valid_aca_count = aca_smu_get_valid_aca_count,
+       .get_valid_aca_bank = aca_smu_get_valid_aca_bank,
+};
+
 static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
                                               enum pp_xgmi_plpd_mode mode)
 {
@@ -2895,13 +3040,6 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
        return ret;
 }
 
-static ssize_t smu_v13_0_6_get_ecc_info(struct smu_context *smu,
-                       void *table)
-{
-       /* Support ecc info by default */
-       return 0;
-}
-
 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
        /* init dpm */
        .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -2956,7 +3094,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
        .i2c_init = smu_v13_0_6_i2c_control_init,
        .i2c_fini = smu_v13_0_6_i2c_control_fini,
        .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
-       .get_ecc_info = smu_v13_0_6_get_ecc_info,
 };
 
 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -2969,4 +3106,5 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
        smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
        smu_v13_0_set_smu_mailbox_registers(smu);
        amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
+       amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
 }
index 4894f7ee737b41dd0e81503b5cb7f3fc1182a6e6..2aa7e9945a0bcfd4c32edeb9ce5f652c42ae8232 100644 (file)
@@ -892,7 +892,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu)
        // TODO: THM related
 
        ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
-                               0xfe,
+                               SMU_IH_INTERRUPT_ID_TO_DRIVER,
                                irq_src);
        if (ret)
                return ret;
index 00cd615bbcdc0bf1b88dbb67e94c86b86173777a..b8dbd4e2534881e3cba8013d25998e40e1396a1a 100644 (file)
@@ -378,8 +378,15 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
        res = __smu_cmn_reg2errno(smu, reg);
        if (res != 0)
                __smu_cmn_reg_print_error(smu, reg, index, param, msg);
-       if (read_arg)
+       if (read_arg) {
                smu_cmn_read_arg(smu, read_arg);
+               dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x,\
+                       readval: 0x%08x\n",
+                       smu_get_message_name(smu, msg), index, param, reg, *read_arg);
+       } else {
+               dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n",
+                       smu_get_message_name(smu, msg), index, param, reg);
+       }
 Out:
        if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
                amdgpu_device_halt(adev);
index cc590e27d88ac903d6b24dcdef5725f8cf2de76e..81bfce1406e52e4bcaf03e470ba13ebfbc541864 100644 (file)
 #define FDO_PWM_MODE_STATIC  1
 #define FDO_PWM_MODE_STATIC_RPM 5
 
+#define SMU_IH_INTERRUPT_ID_TO_DRIVER                   0xFE
+#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO                0x2
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AC                  0x3
+#define SMU_IH_INTERRUPT_CONTEXT_ID_DC                  0x4
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0            0x5
+#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3            0x6
+#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING  0x7
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL        0x8
+#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY        0x9
+
 extern const int link_speed[];
 
 /* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
index e8fae5c77514a257a9b04483e1af2a354a923e57..2bfd6d0ff050033cb972309cba15189268a43c94 100644 (file)
@@ -33,7 +33,7 @@ static inline uint8_t get_u8(void *bios, int ptr)
 #define CU8(ptr) get_u8(ctx->bios, (ptr))
 static inline uint16_t get_u16(void *bios, int ptr)
 {
-    return get_u8(bios ,ptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
+    return get_u8(biosptr)|(((uint16_t)get_u8(bios, ptr+1))<<8);
 }
 #define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
 #define CU16(ptr) get_u16(ctx->bios, (ptr))
index ceb6d772ef94c83bef2fb171a15b252acef1d436..5bc3e6b41c34e2c42a2e5d152ee7eedbd94e8263 100644 (file)
@@ -60,6 +60,7 @@
 typedef struct {
        struct atom_context *ctx;
        uint32_t *ps, *ws;
+       int ps_size, ws_size;
        int ps_shift;
        uint16_t start;
        unsigned last_jump;
@@ -68,8 +69,8 @@ typedef struct {
 } atom_exec_context;
 
 int atom_debug = 0;
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params);
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
 
 static uint32_t atom_arg_mask[8] = {
        0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
@@ -221,7 +222,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
                (*ptr)++;
                /* get_unaligned_le32 avoids unaligned accesses from atombios
                 * tables, noticed on a DEC Alpha. */
-               val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+               if (idx < ctx->ps_size)
+                       val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+               else
+                       pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
                if (print)
                        DEBUG("PS[0x%02X,0x%04X]", idx, val);
                break;
@@ -259,7 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
                        val = gctx->reg_block;
                        break;
                default:
-                       val = ctx->ws[idx];
+                       if (idx < ctx->ws_size)
+                               val = ctx->ws[idx];
+                       else
+                               pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
                }
                break;
        case ATOM_ARG_ID:
@@ -494,6 +501,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
                idx = U8(*ptr);
                (*ptr)++;
                DEBUG("PS[0x%02X]", idx);
+               if (idx >= ctx->ps_size) {
+                       pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+                       return;
+               }
                ctx->ps[idx] = cpu_to_le32(val);
                break;
        case ATOM_ARG_WS:
@@ -526,6 +537,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
                        gctx->reg_block = val;
                        break;
                default:
+                       if (idx >= ctx->ws_size) {
+                               pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+                               return;
+                       }
                        ctx->ws[idx] = val;
                }
                break;
@@ -623,7 +638,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
        else
                SDEBUG("   table: %d\n", idx);
        if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
-               r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+               r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
        if (r) {
                ctx->abort = true;
        }
@@ -1152,7 +1167,7 @@ static struct {
        atom_op_shr, ATOM_ARG_MC}, {
 atom_op_debug, 0},};
 
-static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
 {
        int base = CU16(ctx->cmd_table + 4 + 2 * index);
        int len, ws, ps, ptr;
@@ -1174,12 +1189,16 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
        ectx.ps_shift = ps / 4;
        ectx.start = base;
        ectx.ps = params;
+       ectx.ps_size = params_size;
        ectx.abort = false;
        ectx.last_jump = 0;
-       if (ws)
+       if (ws) {
                ectx.ws = kcalloc(4, ws, GFP_KERNEL);
-       else
+               ectx.ws_size = ws;
+       } else {
                ectx.ws = NULL;
+               ectx.ws_size = 0;
+       }
 
        debug_depth++;
        while (1) {
@@ -1212,7 +1231,7 @@ free:
        return ret;
 }
 
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
 {
        int r;
 
@@ -1228,16 +1247,16 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
        /* reset divmul */
        ctx->divmul[0] = 0;
        ctx->divmul[1] = 0;
-       r = atom_execute_table_locked(ctx, index, params);
+       r = atom_execute_table_locked(ctx, index, params, params_size);
        mutex_unlock(&ctx->mutex);
        return r;
 }
 
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
 {
        int r;
        mutex_lock(&ctx->scratch_mutex);
-       r = atom_execute_table_scratch_unlocked(ctx, index, params);
+       r = atom_execute_table_scratch_unlocked(ctx, index, params, params_size);
        mutex_unlock(&ctx->scratch_mutex);
        return r;
 }
@@ -1335,7 +1354,7 @@ int atom_asic_init(struct atom_context *ctx)
 
        if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
                return 1;
-       ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+       ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
        if (ret)
                return ret;
 
@@ -1343,7 +1362,7 @@ int atom_asic_init(struct atom_context *ctx)
 
        if (rdev->family < CHIP_R600) {
                if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
-                       atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
+                       atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps, 16);
        }
        return ret;
 }
index 5de0563b63d2ef6bb6583c37ccb32a12b99dcc73..5bf06c0bd6ff3d3c41aedcd64c4bd3a381abd2e0 100644 (file)
@@ -145,8 +145,8 @@ struct atom_context {
 extern int atom_debug;
 
 struct atom_context *atom_parse(struct card_info *, void *);
-int atom_execute_table(struct atom_context *, int, uint32_t *);
-int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *, int);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *, int);
 int atom_asic_init(struct atom_context *);
 void atom_destroy(struct atom_context *);
 bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
index ade13173921b8f87aaf27f38075d1f74ec5a80e0..9b3a3a9d60e2033f9c9070cb52409b919265a8ba 100644 (file)
@@ -77,7 +77,7 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
                args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
                break;
        }
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_scaler_setup(struct drm_crtc *crtc)
@@ -157,7 +157,7 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
                        break;
                }
        }
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
        if ((is_tv || is_cv)
            && rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) {
                atom_rv515_force_tv_scaler(rdev, radeon_crtc);
@@ -178,7 +178,7 @@ static void atombios_lock_crtc(struct drm_crtc *crtc, int lock)
        args.ucCRTC = radeon_crtc->crtc_id;
        args.ucEnable = lock;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
@@ -194,7 +194,7 @@ static void atombios_enable_crtc(struct drm_crtc *crtc, int state)
        args.ucCRTC = radeon_crtc->crtc_id;
        args.ucEnable = state;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
@@ -210,7 +210,7 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
        args.ucCRTC = radeon_crtc->crtc_id;
        args.ucEnable = state;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static const u32 vga_control_regs[6] =
@@ -242,7 +242,7 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
        args.ucCRTC = radeon_crtc->crtc_id;
        args.ucBlanking = state;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        if (ASIC_IS_DCE8(rdev))
                WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
@@ -261,7 +261,7 @@ static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
        args.ucDispPipeId = radeon_crtc->crtc_id;
        args.ucEnable = state;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -343,7 +343,7 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
        args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
        args.ucCRTC = radeon_crtc->crtc_id;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_crtc_set_timing(struct drm_crtc *crtc,
@@ -389,7 +389,7 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
        args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
        args.ucCRTC = radeon_crtc->crtc_id;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
@@ -546,7 +546,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
                args.lvds_ss.ucSpreadSpectrumStepSize_Delay |= (ss->delay & 7) << 4;
                args.lvds_ss.ucEnable = enable;
        }
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union adjust_pixel_clock {
@@ -692,7 +692,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                                ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
                                atom_execute_table(rdev->mode_info.atom_context,
-                                                  index, (uint32_t *)&args);
+                                                  index, (uint32_t *)&args, sizeof(args));
                                adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
                                break;
                        case 3:
@@ -725,7 +725,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        args.v3.sInput.ucExtTransmitterID = 0;
 
                                atom_execute_table(rdev->mode_info.atom_context,
-                                                  index, (uint32_t *)&args);
+                                                  index, (uint32_t *)&args, sizeof(args));
                                adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
                                if (args.v3.sOutput.ucRefDiv) {
                                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -809,7 +809,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
                DRM_ERROR("Unknown table version %d %d\n", frev, crev);
                return;
        }
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void atombios_crtc_program_pll(struct drm_crtc *crtc,
@@ -949,7 +949,7 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
                return;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
index 009333645438099e1f83ba1c551d71438261dd3c..fca8b08535a5443c8e20c40453280e0ae5ca8d4d 100644 (file)
@@ -112,7 +112,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
        if (ASIC_IS_DCE4(rdev))
                args.v2.ucHPD_ID = chan->rec.hpd;
 
-       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        *ack = args.v1.ucReplyStatus;
 
@@ -354,7 +354,7 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
        args.ucLaneNum = lane_num;
        args.ucStatus = 0;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
        return args.ucStatus;
 }
 
index 6e537c5bd2959dc75b70cb406569f3e2295bbe30..2bff0d9e20f530f7ea62a86d06530459c6d2ecb9 100644 (file)
@@ -119,12 +119,12 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
                        index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
                        if (dig->backlight_level == 0) {
                                args.ucAction = ATOM_LCD_BLOFF;
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                        } else {
                                args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                                args.ucAction = ATOM_LCD_BLON;
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                        }
                        break;
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -389,7 +389,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
        }
        args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
 }
 
@@ -445,7 +445,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
 
        args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
 }
 
@@ -546,7 +546,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
                break;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union lvds_encoder_control {
@@ -664,7 +664,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                break;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 int
@@ -979,7 +979,7 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
                break;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
 }
 
@@ -1361,7 +1361,7 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
                break;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void
@@ -1397,7 +1397,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
 
        args.v1.ucAction = action;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        /* wait for the panel to power up */
        if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1519,7 +1519,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
                DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
                return;
        }
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 static void
@@ -1554,7 +1554,7 @@ atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
                args.ucEnable = ATOM_ENABLE;
        args.ucCRTC = radeon_crtc->crtc_id;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        WREG32(reg, temp);
 }
@@ -1618,10 +1618,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
                if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
                        u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
                        WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                        WREG32(RADEON_BIOS_3_SCRATCH, reg);
                } else
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                        if (rdev->mode_info.bl_encoder) {
                                struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -1629,7 +1629,7 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
                                atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
                        } else {
                                args.ucAction = ATOM_LCD_BLON;
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                        }
                }
                break;
@@ -1637,10 +1637,10 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
                args.ucAction = ATOM_DISABLE;
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
                        args.ucAction = ATOM_LCD_BLOFF;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
                }
                break;
        }
@@ -1983,7 +1983,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
                return;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        /* update scratch regs with new routing */
        radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
@@ -2311,7 +2311,7 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
                                args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
                }
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                return true;
        } else
index ab4d21072191f949e6346bde18b298fedc9b9eb8..730f0b25312b722dc813e579bd0bf6362c6a10c1 100644 (file)
@@ -78,7 +78,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
        args.ucSlaveAddr = slave_addr << 1;
        args.ucLineNumber = chan->rec.i2c_id;
 
-       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        /* error */
        if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
index 4e64ed38c439fa1b1d5cc8e6d11ac65989220236..70931b04bbac8840476f2d5a2685847e82964a6e 100644 (file)
@@ -53,8 +53,7 @@
 extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
 //********* BARTS **************//
-static const u32 barts_cgcg_cgls_default[] =
-{
+static const u32 barts_cgcg_cgls_default[] = {
        /* Register,   Value,     Mask bits */
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
@@ -107,8 +106,7 @@ static const u32 barts_cgcg_cgls_default[] =
 };
 #define BARTS_CGCG_CGLS_DEFAULT_LENGTH sizeof(barts_cgcg_cgls_default) / (3 * sizeof(u32))
 
-static const u32 barts_cgcg_cgls_disable[] =
-{
+static const u32 barts_cgcg_cgls_disable[] = {
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
        0x000008f8, 0x00000011, 0xffffffff,
@@ -162,8 +160,7 @@ static const u32 barts_cgcg_cgls_disable[] =
 };
 #define BARTS_CGCG_CGLS_DISABLE_LENGTH sizeof(barts_cgcg_cgls_disable) / (3 * sizeof(u32))
 
-static const u32 barts_cgcg_cgls_enable[] =
-{
+static const u32 barts_cgcg_cgls_enable[] = {
        /* 0x0000c124, 0x84180000, 0x00180000, */
        0x00000644, 0x000f7892, 0x001f4080,
        0x000008f8, 0x00000010, 0xffffffff,
@@ -217,8 +214,7 @@ static const u32 barts_cgcg_cgls_enable[] =
 };
 #define BARTS_CGCG_CGLS_ENABLE_LENGTH sizeof(barts_cgcg_cgls_enable) / (3 * sizeof(u32))
 
-static const u32 barts_mgcg_default[] =
-{
+static const u32 barts_mgcg_default[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x00005448, 0x00000100, 0xffffffff,
        0x000055e4, 0x00600100, 0xffffffff,
@@ -366,8 +362,7 @@ static const u32 barts_mgcg_default[] =
 };
 #define BARTS_MGCG_DEFAULT_LENGTH sizeof(barts_mgcg_default) / (3 * sizeof(u32))
 
-static const u32 barts_mgcg_disable[] =
-{
+static const u32 barts_mgcg_disable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
@@ -381,8 +376,7 @@ static const u32 barts_mgcg_disable[] =
 };
 #define BARTS_MGCG_DISABLE_LENGTH sizeof(barts_mgcg_disable) / (3 * sizeof(u32))
 
-static const u32 barts_mgcg_enable[] =
-{
+static const u32 barts_mgcg_enable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
@@ -397,8 +391,7 @@ static const u32 barts_mgcg_enable[] =
 #define BARTS_MGCG_ENABLE_LENGTH sizeof(barts_mgcg_enable) / (3 * sizeof(u32))
 
 //********* CAICOS **************//
-static const u32 caicos_cgcg_cgls_default[] =
-{
+static const u32 caicos_cgcg_cgls_default[] = {
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
        0x000008f8, 0x00000011, 0xffffffff,
@@ -450,8 +443,7 @@ static const u32 caicos_cgcg_cgls_default[] =
 };
 #define CAICOS_CGCG_CGLS_DEFAULT_LENGTH sizeof(caicos_cgcg_cgls_default) / (3 * sizeof(u32))
 
-static const u32 caicos_cgcg_cgls_disable[] =
-{
+static const u32 caicos_cgcg_cgls_disable[] = {
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
        0x000008f8, 0x00000011, 0xffffffff,
@@ -505,8 +497,7 @@ static const u32 caicos_cgcg_cgls_disable[] =
 };
 #define CAICOS_CGCG_CGLS_DISABLE_LENGTH sizeof(caicos_cgcg_cgls_disable) / (3 * sizeof(u32))
 
-static const u32 caicos_cgcg_cgls_enable[] =
-{
+static const u32 caicos_cgcg_cgls_enable[] = {
        /* 0x0000c124, 0x84180000, 0x00180000, */
        0x00000644, 0x000f7892, 0x001f4080,
        0x000008f8, 0x00000010, 0xffffffff,
@@ -560,8 +551,7 @@ static const u32 caicos_cgcg_cgls_enable[] =
 };
 #define CAICOS_CGCG_CGLS_ENABLE_LENGTH sizeof(caicos_cgcg_cgls_enable) / (3 * sizeof(u32))
 
-static const u32 caicos_mgcg_default[] =
-{
+static const u32 caicos_mgcg_default[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x00005448, 0x00000100, 0xffffffff,
        0x000055e4, 0x00600100, 0xffffffff,
@@ -640,8 +630,7 @@ static const u32 caicos_mgcg_default[] =
 };
 #define CAICOS_MGCG_DEFAULT_LENGTH sizeof(caicos_mgcg_default) / (3 * sizeof(u32))
 
-static const u32 caicos_mgcg_disable[] =
-{
+static const u32 caicos_mgcg_disable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
@@ -655,8 +644,7 @@ static const u32 caicos_mgcg_disable[] =
 };
 #define CAICOS_MGCG_DISABLE_LENGTH sizeof(caicos_mgcg_disable) / (3 * sizeof(u32))
 
-static const u32 caicos_mgcg_enable[] =
-{
+static const u32 caicos_mgcg_enable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
@@ -671,8 +659,7 @@ static const u32 caicos_mgcg_enable[] =
 #define CAICOS_MGCG_ENABLE_LENGTH sizeof(caicos_mgcg_enable) / (3 * sizeof(u32))
 
 //********* TURKS **************//
-static const u32 turks_cgcg_cgls_default[] =
-{
+static const u32 turks_cgcg_cgls_default[] = {
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
        0x000008f8, 0x00000011, 0xffffffff,
@@ -724,8 +711,7 @@ static const u32 turks_cgcg_cgls_default[] =
 };
 #define TURKS_CGCG_CGLS_DEFAULT_LENGTH  sizeof(turks_cgcg_cgls_default) / (3 * sizeof(u32))
 
-static const u32 turks_cgcg_cgls_disable[] =
-{
+static const u32 turks_cgcg_cgls_disable[] = {
        0x000008f8, 0x00000010, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
        0x000008f8, 0x00000011, 0xffffffff,
@@ -779,8 +765,7 @@ static const u32 turks_cgcg_cgls_disable[] =
 };
 #define TURKS_CGCG_CGLS_DISABLE_LENGTH sizeof(turks_cgcg_cgls_disable) / (3 * sizeof(u32))
 
-static const u32 turks_cgcg_cgls_enable[] =
-{
+static const u32 turks_cgcg_cgls_enable[] = {
        /* 0x0000c124, 0x84180000, 0x00180000, */
        0x00000644, 0x000f7892, 0x001f4080,
        0x000008f8, 0x00000010, 0xffffffff,
@@ -835,8 +820,7 @@ static const u32 turks_cgcg_cgls_enable[] =
 #define TURKS_CGCG_CGLS_ENABLE_LENGTH sizeof(turks_cgcg_cgls_enable) / (3 * sizeof(u32))
 
 // These are the sequences for turks_mgcg_shls
-static const u32 turks_mgcg_default[] =
-{
+static const u32 turks_mgcg_default[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x00005448, 0x00000100, 0xffffffff,
        0x000055e4, 0x00600100, 0xffffffff,
@@ -935,8 +919,7 @@ static const u32 turks_mgcg_default[] =
 };
 #define TURKS_MGCG_DEFAULT_LENGTH sizeof(turks_mgcg_default) / (3 * sizeof(u32))
 
-static const u32 turks_mgcg_disable[] =
-{
+static const u32 turks_mgcg_disable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0xffffffff, 0xffffffff,
@@ -950,8 +933,7 @@ static const u32 turks_mgcg_disable[] =
 };
 #define TURKS_MGCG_DISABLE_LENGTH sizeof(turks_mgcg_disable) / (3 * sizeof(u32))
 
-static const u32 turks_mgcg_enable[] =
-{
+static const u32 turks_mgcg_enable[] = {
        0x0000802c, 0xc0000000, 0xffffffff,
        0x000008f8, 0x00000000, 0xffffffff,
        0x000008fc, 0x00000000, 0xffffffff,
@@ -972,8 +954,7 @@ static const u32 turks_mgcg_enable[] =
 
 
 //********* BARTS **************//
-static const u32 barts_sysls_default[] =
-{
+static const u32 barts_sysls_default[] = {
        /* Register,   Value,     Mask bits */
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
@@ -993,8 +974,7 @@ static const u32 barts_sysls_default[] =
 };
 #define BARTS_SYSLS_DEFAULT_LENGTH sizeof(barts_sysls_default) / (3 * sizeof(u32))
 
-static const u32 barts_sysls_disable[] =
-{
+static const u32 barts_sysls_disable[] = {
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
        0x000015c0, 0x00041401, 0xffffffff,
@@ -1013,8 +993,7 @@ static const u32 barts_sysls_disable[] =
 };
 #define BARTS_SYSLS_DISABLE_LENGTH sizeof(barts_sysls_disable) / (3 * sizeof(u32))
 
-static const u32 barts_sysls_enable[] =
-{
+static const u32 barts_sysls_enable[] = {
        0x000055e8, 0x00000001, 0xffffffff,
        0x0000d0bc, 0x00000100, 0xffffffff,
        0x000015c0, 0x000c1401, 0xffffffff,
@@ -1034,8 +1013,7 @@ static const u32 barts_sysls_enable[] =
 #define BARTS_SYSLS_ENABLE_LENGTH sizeof(barts_sysls_enable) / (3 * sizeof(u32))
 
 //********* CAICOS **************//
-static const u32 caicos_sysls_default[] =
-{
+static const u32 caicos_sysls_default[] = {
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
        0x000015c0, 0x000c1401, 0xffffffff,
@@ -1053,8 +1031,7 @@ static const u32 caicos_sysls_default[] =
 };
 #define CAICOS_SYSLS_DEFAULT_LENGTH sizeof(caicos_sysls_default) / (3 * sizeof(u32))
 
-static const u32 caicos_sysls_disable[] =
-{
+static const u32 caicos_sysls_disable[] = {
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
        0x000015c0, 0x00041401, 0xffffffff,
@@ -1072,8 +1049,7 @@ static const u32 caicos_sysls_disable[] =
 };
 #define CAICOS_SYSLS_DISABLE_LENGTH sizeof(caicos_sysls_disable) / (3 * sizeof(u32))
 
-static const u32 caicos_sysls_enable[] =
-{
+static const u32 caicos_sysls_enable[] = {
        0x000055e8, 0x00000001, 0xffffffff,
        0x0000d0bc, 0x00000100, 0xffffffff,
        0x000015c0, 0x000c1401, 0xffffffff,
@@ -1092,8 +1068,7 @@ static const u32 caicos_sysls_enable[] =
 #define CAICOS_SYSLS_ENABLE_LENGTH sizeof(caicos_sysls_enable) / (3 * sizeof(u32))
 
 //********* TURKS **************//
-static const u32 turks_sysls_default[] =
-{
+static const u32 turks_sysls_default[] = {
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
        0x000015c0, 0x000c1401, 0xffffffff,
@@ -1112,8 +1087,7 @@ static const u32 turks_sysls_default[] =
 };
 #define TURKS_SYSLS_DEFAULT_LENGTH sizeof(turks_sysls_default) / (3 * sizeof(u32))
 
-static const u32 turks_sysls_disable[] =
-{
+static const u32 turks_sysls_disable[] = {
        0x000055e8, 0x00000000, 0xffffffff,
        0x0000d0bc, 0x00000000, 0xffffffff,
        0x000015c0, 0x00041401, 0xffffffff,
@@ -1132,8 +1106,7 @@ static const u32 turks_sysls_disable[] =
 };
 #define TURKS_SYSLS_DISABLE_LENGTH sizeof(turks_sysls_disable) / (3 * sizeof(u32))
 
-static const u32 turks_sysls_enable[] =
-{
+static const u32 turks_sysls_enable[] = {
        0x000055e8, 0x00000001, 0xffffffff,
        0x0000d0bc, 0x00000100, 0xffffffff,
        0x000015c0, 0x000c1401, 0xffffffff,
@@ -1154,8 +1127,7 @@ static const u32 turks_sysls_enable[] =
 
 #endif
 
-u32 btc_valid_sclk[40] =
-{
+u32 btc_valid_sclk[40] = {
        5000,   10000,  15000,  20000,  25000,  30000,  35000,  40000,  45000,  50000,
        55000,  60000,  65000,  70000,  75000,  80000,  85000,  90000,  95000,  100000,
        105000, 110000, 11500,  120000, 125000, 130000, 135000, 140000, 145000, 150000,
@@ -1194,7 +1166,7 @@ void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_t
        if ((table == NULL) || (table->count == 0))
                return;
 
-       for (i= 0; i < table->count; i++) {
+       for (i = 0; i < table->count; i++) {
                if (clock <= table->entries[i].clk) {
                        if (*voltage < table->entries[i].v)
                                *voltage = (u16)((table->entries[i].v < max_voltage) ?
@@ -1441,7 +1413,7 @@ void btc_program_mgcg_hw_sequence(struct radeon_device *rdev,
        u32 i, length = count * 3;
        u32 tmp;
 
-       for (i = 0; i < length; i+=3) {
+       for (i = 0; i < length; i += 3) {
                tmp = RREG32(sequence[i]);
                tmp &= ~sequence[i+2];
                tmp |= sequence[i+1] & sequence[i+2];
@@ -2003,7 +1975,7 @@ static int btc_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
        for (i = 0; i < table->num_entries; i++) {
                eg_table->mc_reg_table_entry[i].mclk_max =
                        table->mc_reg_table_entry[i].mclk_max;
-               for(j = 0; j < table->last; j++)
+               for (j = 0; j < table->last; j++)
                        eg_table->mc_reg_table_entry[i].mc_data[j] =
                                table->mc_reg_table_entry[i].mc_data[j];
        }
index b8f4dac68d85043f76bdb440d5af47058950dc28..abe9d65cc4605ec0c37335a41d424ac34022bc67 100644 (file)
 #define VOLTAGE_VID_OFFSET_SCALE1    625
 #define VOLTAGE_VID_OFFSET_SCALE2    100
 
-static const struct ci_pt_defaults defaults_hawaii_xt =
-{
+static const struct ci_pt_defaults defaults_hawaii_xt = {
        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
        { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
        { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
 };
 
-static const struct ci_pt_defaults defaults_hawaii_pro =
-{
+static const struct ci_pt_defaults defaults_hawaii_pro = {
        1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
        { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
        { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
 };
 
-static const struct ci_pt_defaults defaults_bonaire_xt =
-{
+static const struct ci_pt_defaults defaults_bonaire_xt = {
        1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
        { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
 };
 
-static const struct ci_pt_defaults defaults_saturn_xt =
-{
+static const struct ci_pt_defaults defaults_saturn_xt = {
        1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
        { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
        { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
 };
 
-static const struct ci_pt_config_reg didt_config_ci[] =
-{
+static const struct ci_pt_config_reg didt_config_ci[] = {
        { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
        { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
        { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
@@ -1216,7 +1211,7 @@ static void ci_thermal_initialize(struct radeon_device *rdev)
 
        if (rdev->pm.fan_pulses_per_revolution) {
                tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
-               tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
+               tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution - 1);
                WREG32_SMC(CG_TACH_CTRL, tmp);
        }
 
@@ -3333,7 +3328,7 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
 }
 
 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
-                                     struct ci_single_dpm_tabledpm_table,
+                                     struct ci_single_dpm_table *dpm_table,
                                      u32 count)
 {
        u32 i;
@@ -3343,7 +3338,7 @@ static void ci_reset_single_dpm_table(struct radeon_device *rdev,
                dpm_table->dpm_levels[i].enabled = false;
 }
 
-static void ci_setup_pcie_table_entry(struct ci_single_dpm_tabledpm_table,
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table *dpm_table,
                                      u32 index, u32 pcie_gen, u32 pcie_lanes)
 {
        dpm_table->dpm_levels[index].value = pcie_gen;
@@ -3503,7 +3498,7 @@ static int ci_find_boot_level(struct ci_single_dpm_table *table,
        u32 i;
        int ret = -EINVAL;
 
-       for(i = 0; i < table->count; i++) {
+       for (i = 0; i < table->count; i++) {
                if (value == table->dpm_levels[i].value) {
                        *boot_level = i;
                        ret = 0;
@@ -4304,7 +4299,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev,
        for (i = 0, j = table->last; i < table->last; i++) {
                if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
                        return -EINVAL;
-               switch(table->mc_reg_address[i].s1 << 2) {
+               switch (table->mc_reg_address[i].s1 << 2) {
                case MC_SEQ_MISC1:
                        temp_reg = RREG32(MC_PMG_CMD_EMRS);
                        table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
@@ -4369,7 +4364,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
 {
        bool result = true;
 
-       switch(in_reg) {
+       switch (in_reg) {
        case MC_SEQ_RAS_TIMING >> 2:
                *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
                break;
@@ -4508,7 +4503,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
                for (i = 0; i < table->last; i++) {
                        if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
-                       switch(table->mc_reg_address[i].s1 >> 2) {
+                       switch (table->mc_reg_address[i].s1 >> 2) {
                        case MC_SEQ_MISC1:
                                for (k = 0; k < table->num_entries; k++) {
                                        if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
@@ -4683,7 +4678,7 @@ static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
        struct ci_power_info *pi = ci_get_pi(rdev);
        u32 i = 0;
 
-       for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+       for (i = 0; i < pi->mc_reg_table.num_entries; i++) {
                if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
                        break;
        }
index ac12db5f2cf7bbbc578eff2a4203acdf1f4fafe1..74b95c200222b580d755262c7bcb8f8d88ebc4c3 100644 (file)
@@ -87,8 +87,7 @@ struct ci_mc_reg_table {
        SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
 };
 
-struct ci_ulv_parm
-{
+struct ci_ulv_parm {
        bool supported;
        u32 cg_ulv_parameter;
        u32 volt_change_delay;
@@ -113,8 +112,7 @@ struct ci_dpm_level_enable_mask {
        u32 pcie_dpm_enable_mask;
 };
 
-struct ci_vbios_boot_state
-{
+struct ci_vbios_boot_state {
        u16 mvdd_bootup_value;
        u16 vddc_bootup_value;
        u16 vddci_bootup_value;
index 4774e04c4da65345719b50b2f5c3b715138a640a..7693fb6624a382ad96fcefeb2615fec86d71a35c 100644 (file)
@@ -23,8 +23,7 @@
 
 #include "clearstate_defs.h"
 
-static const u32 SECT_CONTEXT_def_1[] =
-{
+static const u32 SECT_CONTEXT_def_1[] = {
     0x00000000, // DB_RENDER_CONTROL
     0x00000000, // DB_COUNT_CONTROL
     0x00000000, // DB_DEPTH_VIEW
@@ -514,8 +513,7 @@ static const u32 SECT_CONTEXT_def_1[] =
     0x00000000, // CB_BLEND6_CONTROL
     0x00000000, // CB_BLEND7_CONTROL
 };
-static const u32 SECT_CONTEXT_def_2[] =
-{
+static const u32 SECT_CONTEXT_def_2[] = {
     0x00000000, // PA_CL_POINT_X_RAD
     0x00000000, // PA_CL_POINT_Y_RAD
     0x00000000, // PA_CL_POINT_SIZE
@@ -523,8 +521,7 @@ static const u32 SECT_CONTEXT_def_2[] =
     0x00000000, // VGT_DMA_BASE_HI
     0x00000000, // VGT_DMA_BASE
 };
-static const u32 SECT_CONTEXT_def_3[] =
-{
+static const u32 SECT_CONTEXT_def_3[] = {
     0x00000000, // DB_DEPTH_CONTROL
     0x00000000, // DB_EQAA
     0x00000000, // CB_COLOR_CONTROL
index c1b6c22dbed7367b01a1e19fc9ad216420fb5981..0045d42aa27cfa2234503c870cef9b406941b27d 100644 (file)
@@ -23,8 +23,7 @@
 
 #include "clearstate_defs.h"
 
-static const unsigned int ci_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int ci_SECT_CONTEXT_def_1[] = {
     0x00000000, // DB_RENDER_CONTROL
     0x00000000, // DB_COUNT_CONTROL
     0x00000000, // DB_DEPTH_VIEW
index a424b86008b88518ec581720b9262153d8d9d8c4..c634dc28e6c300786224f0af8b38107ae02a03af 100644 (file)
@@ -2514,8 +2514,7 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
        WREG32(VM_CONTEXT1_CNTL, 0);
 }
 
-static const unsigned ni_dig_offsets[] =
-{
+static const unsigned ni_dig_offsets[] = {
        NI_DIG0_REGISTER_OFFSET,
        NI_DIG1_REGISTER_OFFSET,
        NI_DIG2_REGISTER_OFFSET,
@@ -2524,8 +2523,7 @@ static const unsigned ni_dig_offsets[] =
        NI_DIG5_REGISTER_OFFSET
 };
 
-static const unsigned ni_tx_offsets[] =
-{
+static const unsigned ni_tx_offsets[] = {
        NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
        NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
        NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
@@ -2534,8 +2532,7 @@ static const unsigned ni_tx_offsets[] =
        NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
 };
 
-static const unsigned evergreen_dp_offsets[] =
-{
+static const unsigned evergreen_dp_offsets[] = {
        EVERGREEN_DP0_REGISTER_OFFSET,
        EVERGREEN_DP1_REGISTER_OFFSET,
        EVERGREEN_DP2_REGISTER_OFFSET,
@@ -2544,8 +2541,7 @@ static const unsigned evergreen_dp_offsets[] =
        EVERGREEN_DP5_REGISTER_OFFSET
 };
 
-static const unsigned evergreen_disp_int_status[] =
-{
+static const unsigned evergreen_disp_int_status[] = {
        DISP_INTERRUPT_STATUS,
        DISP_INTERRUPT_STATUS_CONTINUE,
        DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -2643,7 +2639,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
                return;
        }
 
-       stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+       stream_ctrl &= ~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
        WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
               evergreen_dp_offsets[dig_fe], stream_ctrl);
 
@@ -2655,7 +2651,7 @@ static void evergreen_blank_dp_output(struct radeon_device *rdev,
                stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
                                     evergreen_dp_offsets[dig_fe]);
        }
-       if (counter >= 32 )
+       if (counter >= 32)
                DRM_ERROR("counter exceeds %d\n", counter);
 
        fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
@@ -2716,7 +2712,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                        /*for now we do it this manually*/
                        /**/
                        if (ASIC_IS_DCE5(rdev) &&
-                           evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+                           evergreen_is_dp_sst_stream_enabled(rdev, i&dig_fe))
                                evergreen_blank_dp_output(rdev, dig_fe);
                        /*we could remove 6 lines below*/
                        /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@@ -3597,7 +3593,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
        sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
 
-       sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2))* 12 / 32);
+       sq_gpr_resource_mgmt_1 = NUM_PS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 12 / 32);
        sq_gpr_resource_mgmt_1 |= NUM_VS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 6 / 32);
        sq_gpr_resource_mgmt_1 |= NUM_CLAUSE_TEMP_GPRS(4);
        sq_gpr_resource_mgmt_2 = NUM_GS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 4 / 32);
index 0de79f3a7e3ffc4f7e423d8448970e4106b55d3a..1fe6e0d883c79be07817a3cf582f128c9d5d97dd 100644 (file)
@@ -33,8 +33,8 @@
 #include "evergreen_reg_safe.h"
 #include "cayman_reg_safe.h"
 
-#define MAX(a,b)                   (((a)>(b))?(a):(b))
-#define MIN(a,b)                   (((a)<(b))?(a):(b))
+#define MAX(a, b)                   (((a) > (b)) ? (a) : (b))
+#define MIN(a, b)                   (((a) < (b)) ? (a) : (b))
 
 #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
 
index b436badf9efa356e00b80adee5cda4f191c5b537..3ff9fda54aa301a75c3b3f68babc1611cd9050a3 100644 (file)
 
 
 #define NI_DIG_BE_CNTL                    0x7140
-#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
-#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
+#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8) & 0x3F)
+#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7)
 
 #define NI_DIG_BE_EN_CNTL                              0x7144
 #       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
 
 #define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
 #       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
-#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
+#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 << 16)
 #define EVERGREEN_DP_STEER_FIFO                         0x7310
 #       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
 #define EVERGREEN_DP_SEC_CNTL                           0x7280
 #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
 
 /*DCIO_UNIPHY block*/
-#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
-#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600 0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640 0x6600)
 #define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
 #define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
 #define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
index 3a03ba37d04346fb5200d67ee002716ae549a600..b34d54b567b746933ce9f0ef2f8e6dea7917983d 100644 (file)
@@ -29,8 +29,7 @@
 
 #define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
 
-struct SMC_Evergreen_MCRegisterAddress
-{
+struct SMC_Evergreen_MCRegisterAddress {
     uint16_t s0;
     uint16_t s1;
 };
@@ -38,15 +37,13 @@ struct SMC_Evergreen_MCRegisterAddress
 typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
 
 
-struct SMC_Evergreen_MCRegisterSet
-{
+struct SMC_Evergreen_MCRegisterSet {
     uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
 };
 
 typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
 
-struct SMC_Evergreen_MCRegisters
-{
+struct SMC_Evergreen_MCRegisters {
     uint8_t                             last;
     uint8_t                             reserved[3];
     SMC_Evergreen_MCRegisterAddress     address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
index f7735da07feb93aea8c9e27a48f3f7566a974ded..55dbf450bd9ca35bf42e7354dab22bfb432d84d1 100644 (file)
@@ -64,8 +64,7 @@ extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
 extern void cik_update_cg(struct radeon_device *rdev,
                          u32 block, bool enable);
 
-static const struct kv_pt_config_reg didt_config_kv[] =
-{
+static const struct kv_pt_config_reg didt_config_kv[] = {
        { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
        { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
        { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
@@ -931,9 +930,9 @@ static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
                                        pi->graphics_level[i].ClkBypassCntl = 2;
                                else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
                                        pi->graphics_level[i].ClkBypassCntl = 7;
-                               else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+                               else if (kv_get_clock_difference(table->entries[i].clk, 20000) < 200)
                                        pi->graphics_level[i].ClkBypassCntl = 6;
-                               else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+                               else if (kv_get_clock_difference(table->entries[i].clk, 10000) < 200)
                                        pi->graphics_level[i].ClkBypassCntl = 8;
                                else
                                        pi->graphics_level[i].ClkBypassCntl = 0;
@@ -1577,7 +1576,7 @@ static void kv_set_valid_clock_range(struct radeon_device *rdev,
                        if ((new_ps->levels[0].sclk -
                             table->entries[pi->highest_valid].sclk_frequency) >
                            (table->entries[pi->lowest_valid].sclk_frequency -
-                            new_ps->levels[new_ps->num_levels -1].sclk))
+                            new_ps->levels[new_ps->num_levels - 1].sclk))
                                pi->highest_valid = pi->lowest_valid;
                        else
                                pi->lowest_valid =  pi->highest_valid;
index c0a59527e7b8e329d6f7e9d2727c7cd31d1b8490..65831cca6730f0e5ee7f55f38616b60063a6a180 100644 (file)
@@ -189,7 +189,7 @@ int kv_copy_bytes_to_smc(struct radeon_device *rdev,
                if (ret)
                        return ret;
 
-               original_data= RREG32(SMC_IND_DATA_0);
+               original_data = RREG32(SMC_IND_DATA_0);
 
                extra_shift = 8 * (4 - byte_count);
 
index 927e5f42e97d018240b5204c9d0dd7339391292a..9f0881ab3105c683471fec3a0adb42240388024d 100644 (file)
@@ -66,8 +66,7 @@ void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
        spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
 }
 
-static const u32 tn_rlc_save_restore_register_list[] =
-{
+static const u32 tn_rlc_save_restore_register_list[] = {
        0x98fc,
        0x98f0,
        0x9834,
@@ -216,8 +215,7 @@ MODULE_FIRMWARE("radeon/ARUBA_me.bin");
 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
 
 
-static const u32 cayman_golden_registers2[] =
-{
+static const u32 cayman_golden_registers2[] = {
        0x3e5c, 0xffffffff, 0x00000000,
        0x3e48, 0xffffffff, 0x00000000,
        0x3e4c, 0xffffffff, 0x00000000,
@@ -226,8 +224,7 @@ static const u32 cayman_golden_registers2[] =
        0x3e60, 0xffffffff, 0x00000000
 };
 
-static const u32 cayman_golden_registers[] =
-{
+static const u32 cayman_golden_registers[] = {
        0x5eb4, 0xffffffff, 0x00000002,
        0x5e78, 0x8f311ff1, 0x001000f0,
        0x3f90, 0xffff0000, 0xff000000,
@@ -267,16 +264,14 @@ static const u32 cayman_golden_registers[] =
        0x8974, 0xffffffff, 0x00000000
 };
 
-static const u32 dvst_golden_registers2[] =
-{
+static const u32 dvst_golden_registers2[] = {
        0x8f8, 0xffffffff, 0,
        0x8fc, 0x00380000, 0,
        0x8f8, 0xffffffff, 1,
        0x8fc, 0x0e000000, 0
 };
 
-static const u32 dvst_golden_registers[] =
-{
+static const u32 dvst_golden_registers[] = {
        0x690, 0x3fff3fff, 0x20c00033,
        0x918c, 0x0fff0fff, 0x00010006,
        0x91a8, 0x0fff0fff, 0x00010006,
@@ -333,8 +328,7 @@ static const u32 dvst_golden_registers[] =
        0x8974, 0xffffffff, 0x00000000
 };
 
-static const u32 scrapper_golden_registers[] =
-{
+static const u32 scrapper_golden_registers[] = {
        0x690, 0x3fff3fff, 0x20c00033,
        0x918c, 0x0fff0fff, 0x00010006,
        0x918c, 0x0fff0fff, 0x00010006,
@@ -624,7 +618,7 @@ static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
 int ni_mc_load_microcode(struct radeon_device *rdev)
 {
        const __be32 *fw_data;
-       u32 mem_type, running, blackout = 0;
+       u32 mem_type, running;
        u32 *io_mc_regs;
        int i, ucode_size, regs_size;
 
@@ -659,11 +653,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
        running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
        if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
-               if (running) {
-                       blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
-               }
-
                /* reset the engine and set to writable */
                WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
                WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
@@ -689,9 +678,6 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
                                break;
                        udelay(1);
                }
-
-               if (running)
-                       WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
        }
 
        return 0;
@@ -754,7 +740,8 @@ int ni_init_microcode(struct radeon_device *rdev)
                rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
                mc_req_size = 0;
                break;
-       default: BUG();
+       default:
+               BUG();
        }
 
        DRM_INFO("Loading %s Microcode\n", chip_name);
index 3e1c1a392fb7b3b2e7c039d476e89adb793dc0de..e08559c44a5c482284fbb3264f61e34002750013 100644 (file)
@@ -3103,9 +3103,6 @@ static int ni_init_simplified_leakage_table(struct radeon_device *rdev,
        u32 smc_leakage, max_leakage = 0;
        u32 scaling_factor;
 
-       if (!leakage_table)
-               return -EINVAL;
-
        table_size = leakage_table->count;
 
        if (eg_pi->vddc_voltage_table.count != table_size)
index 74e30193690636d5fb5c2061dde610dfbbbd4a69..4e3e7303e035dce4fcfa2da80e139557e397cd79 100644 (file)
@@ -59,8 +59,7 @@ struct ni_mc_reg_table {
 
 #define NISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 2
 
-enum ni_dc_cac_level
-{
+enum ni_dc_cac_level {
        NISLANDS_DCCAC_LEVEL_0 = 0,
        NISLANDS_DCCAC_LEVEL_1,
        NISLANDS_DCCAC_LEVEL_2,
@@ -72,8 +71,7 @@ enum ni_dc_cac_level
        NISLANDS_DCCAC_MAX_LEVELS
 };
 
-struct ni_leakage_coeffients
-{
+struct ni_leakage_coeffients {
        u32 at;
        u32 bt;
        u32 av;
@@ -83,8 +81,7 @@ struct ni_leakage_coeffients
        u32 t_ref;
 };
 
-struct ni_cac_data
-{
+struct ni_cac_data {
        struct ni_leakage_coeffients leakage_coefficients;
        u32 i_leakage;
        s32 leakage_minimum_temperature;
@@ -100,8 +97,7 @@ struct ni_cac_data
        u8 lts_truncate_n;
 };
 
-struct ni_cac_weights
-{
+struct ni_cac_weights {
        u32 weight_tcp_sig0;
        u32 weight_tcp_sig1;
        u32 weight_ta_sig;
index 42f3bab0f9ee689845765c44b849538e8d53d121..097893c38915cc3534b755ecf115e63bc51427e1 100644 (file)
@@ -27,8 +27,7 @@
 
 #define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
 
-struct PP_NIslands_Dpm2PerfLevel
-{
+struct PP_NIslands_Dpm2PerfLevel {
     uint8_t     MaxPS;
     uint8_t     TgtAct;
     uint8_t     MaxPS_StepInc;
@@ -44,8 +43,7 @@ struct PP_NIslands_Dpm2PerfLevel
 
 typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
 
-struct PP_NIslands_DPM2Parameters
-{
+struct PP_NIslands_DPM2Parameters {
     uint32_t    TDPLimit;
     uint32_t    NearTDPLimit;
     uint32_t    SafePowerLimit;
@@ -53,8 +51,7 @@ struct PP_NIslands_DPM2Parameters
 };
 typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
 
-struct NISLANDS_SMC_SCLK_VALUE
-{
+struct NISLANDS_SMC_SCLK_VALUE {
     uint32_t        vCG_SPLL_FUNC_CNTL;
     uint32_t        vCG_SPLL_FUNC_CNTL_2;
     uint32_t        vCG_SPLL_FUNC_CNTL_3;
@@ -66,8 +63,7 @@ struct NISLANDS_SMC_SCLK_VALUE
 
 typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
 
-struct NISLANDS_SMC_MCLK_VALUE
-{
+struct NISLANDS_SMC_MCLK_VALUE {
     uint32_t        vMPLL_FUNC_CNTL;
     uint32_t        vMPLL_FUNC_CNTL_1;
     uint32_t        vMPLL_FUNC_CNTL_2;
@@ -84,8 +80,7 @@ struct NISLANDS_SMC_MCLK_VALUE
 
 typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
 
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
+struct NISLANDS_SMC_VOLTAGE_VALUE {
     uint16_t             value;
     uint8_t              index;
     uint8_t              padding;
@@ -93,8 +88,7 @@ struct NISLANDS_SMC_VOLTAGE_VALUE
 
 typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
 
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
+struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL {
     uint8_t                     arbValue;
     uint8_t                     ACIndex;
     uint8_t                     displayWatermark;
@@ -132,8 +126,7 @@ struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
 
 typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
 
-struct NISLANDS_SMC_SWSTATE
-{
+struct NISLANDS_SMC_SWSTATE {
        uint8_t                             flags;
        uint8_t                             levelCount;
        uint8_t                             padding2;
@@ -156,8 +149,7 @@ struct NISLANDS_SMC_SWSTATE_SINGLE {
 #define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
 #define NISLANDS_SMC_VOLTAGEMASK_MAX   4
 
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
+struct NISLANDS_SMC_VOLTAGEMASKTABLE {
     uint8_t  highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
     uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
 };
@@ -166,8 +158,7 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
 
 #define NISLANDS_MAX_NO_VREG_STEPS 32
 
-struct NISLANDS_SMC_STATETABLE
-{
+struct NISLANDS_SMC_STATETABLE {
        uint8_t                             thermalProtectType;
        uint8_t                             systemFlags;
        uint8_t                             maxVDDCIndexInPPTable;
@@ -203,8 +194,7 @@ typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
 #define SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
 #define SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES 4
 
-struct SMC_NISLANDS_MC_TPP_CAC_TABLE
-{
+struct SMC_NISLANDS_MC_TPP_CAC_TABLE {
     uint32_t    tpp[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
     uint32_t    cacValue[SMC_NISLANDS_MC_TPP_CAC_NUM_OF_ENTRIES];
 };
@@ -212,8 +202,7 @@ struct SMC_NISLANDS_MC_TPP_CAC_TABLE
 typedef struct SMC_NISLANDS_MC_TPP_CAC_TABLE SMC_NISLANDS_MC_TPP_CAC_TABLE;
 
 
-struct PP_NIslands_CACTABLES
-{
+struct PP_NIslands_CACTABLES {
     uint32_t                cac_bif_lut[SMC_NISLANDS_BIF_LUT_NUM_OF_ENTRIES];
     uint32_t                cac_lkge_lut[SMC_NISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_NISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES];
 
@@ -257,8 +246,7 @@ typedef struct PP_NIslands_CACTABLES PP_NIslands_CACTABLES;
 #define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
 #define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
 
-struct SMC_NIslands_MCRegisterAddress
-{
+struct SMC_NIslands_MCRegisterAddress {
     uint16_t s0;
     uint16_t s1;
 };
@@ -266,15 +254,13 @@ struct SMC_NIslands_MCRegisterAddress
 typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
 
 
-struct SMC_NIslands_MCRegisterSet
-{
+struct SMC_NIslands_MCRegisterSet {
     uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
 };
 
 typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
 
-struct SMC_NIslands_MCRegisters
-{
+struct SMC_NIslands_MCRegisters {
     uint8_t                             last;
     uint8_t                             reserved[3];
     SMC_NIslands_MCRegisterAddress      address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
@@ -283,8 +269,7 @@ struct SMC_NIslands_MCRegisters
 
 typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
 
-struct SMC_NIslands_MCArbDramTimingRegisterSet
-{
+struct SMC_NIslands_MCArbDramTimingRegisterSet {
     uint32_t mc_arb_dram_timing;
     uint32_t mc_arb_dram_timing2;
     uint8_t  mc_arb_rfsh_rate;
@@ -293,8 +278,7 @@ struct SMC_NIslands_MCArbDramTimingRegisterSet
 
 typedef struct SMC_NIslands_MCArbDramTimingRegisterSet SMC_NIslands_MCArbDramTimingRegisterSet;
 
-struct SMC_NIslands_MCArbDramTimingRegisters
-{
+struct SMC_NIslands_MCArbDramTimingRegisters {
     uint8_t                                     arb_current;
     uint8_t                                     reserved[3];
     SMC_NIslands_MCArbDramTimingRegisterSet     data[20];
@@ -302,8 +286,7 @@ struct SMC_NIslands_MCArbDramTimingRegisters
 
 typedef struct SMC_NIslands_MCArbDramTimingRegisters SMC_NIslands_MCArbDramTimingRegisters;
 
-struct SMC_NISLANDS_SPLL_DIV_TABLE
-{
+struct SMC_NISLANDS_SPLL_DIV_TABLE {
     uint32_t    freq[256];
     uint32_t    ss[256];
 };
index cfeca2694d5f919a083f8b773bdba91abf352164..86b8b770af19b3d378c19cc03e360574ceb3c981 100644 (file)
@@ -1327,7 +1327,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
            return -EINVAL;
        }
        track->num_arrays = c;
-       for (i = 0; i < (c - 1); i+=2, idx+=3) {
+       for (i = 0; i < (c - 1); i += 2, idx += 3) {
                r = radeon_cs_packet_next_reloc(p, &reloc, 0);
                if (r) {
                        DRM_ERROR("No reloc for packet3 %d\n",
index 9d341cff63ee770c723073ec7b67400466b40eba..d776f929d5c379e82edc33348b5c46e8f2f5034d 100644 (file)
 #      define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
 #      define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
 #      define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
-#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
+#       define R300_TX_MIN_FILTER_MASK   ((15 << 11) | (3 << 13))
 #      define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
 #      define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
 #      define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
index a17b95eec65fb81036c49c8ed9eeabadfc953f07..b5e97d95a19f0cd14d6ad21dc4bdda6bc0db09dd 100644 (file)
@@ -99,8 +99,7 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
 
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
        0,
        AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
 };
index 9d2bcb9551e612f96ca4f4cfb6c165229d7e68ba..64980a61d38a8e24094afd5f368d0c9b4f8e646c 100644 (file)
@@ -28,8 +28,7 @@
 #include "r600_dpm.h"
 #include "atom.h"
 
-const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_utc[R600_PM_NUMBER_OF_TC] = {
        R600_UTC_DFLT_00,
        R600_UTC_DFLT_01,
        R600_UTC_DFLT_02,
@@ -47,8 +46,7 @@ const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
        R600_UTC_DFLT_14,
 };
 
-const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
-{
+const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = {
        R600_DTC_DFLT_00,
        R600_DTC_DFLT_01,
        R600_DTC_DFLT_02,
index 6e4d22ed2a0063df045561a1c1d6bacb7087c63e..5c2513c84c48af0c5f55cb872c7bb110effb45db 100644 (file)
@@ -119,8 +119,7 @@ enum r600_display_watermark {
        R600_DISPLAY_WATERMARK_HIGH = 1,
 };
 
-enum r600_display_gap
-{
+enum r600_display_gap {
     R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
     R600_PM_DISPLAY_GAP_VBLANK       = 1,
     R600_PM_DISPLAY_GAP_WATERMARK    = 2,
index 3d3d2109dfebc49028a12a5978514b8edaf6a918..3e5ff17e3cafb272dfc6326877470b26db43295a 100644 (file)
@@ -1355,14 +1355,12 @@ struct radeon_dpm_thermal {
        bool               high_to_low;
 };
 
-enum radeon_clk_action
-{
+enum radeon_clk_action {
        RADEON_SCLK_UP = 1,
        RADEON_SCLK_DOWN
 };
 
-struct radeon_blacklist_clocks
-{
+struct radeon_blacklist_clocks {
        u32 sclk;
        u32 mclk;
        enum radeon_clk_action action;
index 802b5af19261838442a87154447e9e6ab72bd421..b5a0109b2e2c51a07e0674fad0efdd46b356adea 100644 (file)
@@ -2400,10 +2400,10 @@ int radeon_asic_init(struct radeon_device *rdev)
        case CHIP_RS880:
                rdev->asic = &rs780_asic;
                /* 760G/780V/880V don't have UVD */
-               if ((rdev->pdev->device == 0x9616)||
-                   (rdev->pdev->device == 0x9611)||
-                   (rdev->pdev->device == 0x9613)||
-                   (rdev->pdev->device == 0x9711)||
+               if ((rdev->pdev->device == 0x9616) ||
+                   (rdev->pdev->device == 0x9611) ||
+                   (rdev->pdev->device == 0x9613) ||
+                   (rdev->pdev->device == 0x9711) ||
                    (rdev->pdev->device == 0x9713))
                        rdev->has_uvd = false;
                else
index 3596ea4a8b60f463a231682292bdc570e79b9c33..bb1f0a3371ab5de484a81ad040347c9a5a8d4e76 100644 (file)
@@ -2852,7 +2852,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                args.v1.ucAction = clock_type;
                args.v1.ulClock = cpu_to_le32(clock);   /* 10 khz */
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                dividers->post_div = args.v1.ucPostDiv;
                dividers->fb_div = args.v1.ucFbDiv;
@@ -2866,7 +2866,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                        args.v2.ucAction = clock_type;
                        args.v2.ulClock = cpu_to_le32(clock);   /* 10 khz */
 
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                        dividers->post_div = args.v2.ucPostDiv;
                        dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
@@ -2881,7 +2881,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                        if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
                                args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
 
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                                dividers->post_div = args.v3.ucPostDiv;
                                dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -2901,7 +2901,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                                if (strobe_mode)
                                        args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
 
-                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                                dividers->post_div = args.v5.ucPostDiv;
                                dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -2920,7 +2920,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                /* fusion */
                args.v4.ulClock = cpu_to_le32(clock);   /* 10 khz */
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
                dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -2931,7 +2931,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
                args.v6_in.ulClock.ulComputeClockFlag = clock_type;
                args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock);    /* 10 khz */
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
                dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -2972,7 +2972,7 @@ int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
                        if (strobe_mode)
                                args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
 
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                        mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
                        mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -3005,7 +3005,7 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
 
        args.ucEnable = enable;
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
@@ -3013,7 +3013,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
        GET_ENGINE_CLOCK_PS_ALLOCATION args;
        int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
        return le32_to_cpu(args.ulReturnEngineClock);
 }
 
@@ -3022,7 +3022,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
        GET_MEMORY_CLOCK_PS_ALLOCATION args;
        int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
        return le32_to_cpu(args.ulReturnMemoryClock);
 }
 
@@ -3034,7 +3034,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
 
        args.ulTargetEngineClock = cpu_to_le32(eng_clock);      /* 10 khz */
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void radeon_atom_set_memory_clock(struct radeon_device *rdev,
@@ -3048,7 +3048,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
 
        args.ulTargetMemoryClock = cpu_to_le32(mem_clock);      /* 10 khz */
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
@@ -3067,7 +3067,7 @@ void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
        if (mem_clock)
                args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void radeon_atom_update_memory_dll(struct radeon_device *rdev,
@@ -3078,7 +3078,7 @@ void radeon_atom_update_memory_dll(struct radeon_device *rdev,
 
        args = cpu_to_le32(mem_clock);  /* 10 khz */
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -3090,7 +3090,7 @@ void radeon_atom_set_ac_timing(struct radeon_device *rdev,
 
        args.ulTargetMemoryClock = cpu_to_le32(tmp);    /* 10 khz */
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 union set_voltage {
@@ -3134,7 +3134,7 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
                return;
        }
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 }
 
 int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
@@ -3155,7 +3155,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
                args.v2.ucVoltageMode = 0;
                args.v2.usVoltageLevel = 0;
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                *voltage = le16_to_cpu(args.v2.usVoltageLevel);
                break;
@@ -3164,7 +3164,7 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
                args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
                args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                *voltage = le16_to_cpu(args.v3.usVoltageLevel);
                break;
@@ -3200,7 +3200,7 @@ int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
                args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
                args.v3.usVoltageLevel = 0;
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
                break;
@@ -3327,7 +3327,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
        args.in.ulSCLKFreq =
                cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
 
-       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
        *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
 
@@ -3353,7 +3353,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
                args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
                args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                *gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
 
@@ -3361,7 +3361,7 @@ int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
                args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
                args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
 
-               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+               atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
 
                *gpio_value = le32_to_cpu(*(u32 *)&args.v2);
                break;
index 595354e3ce0b2b07ee42e183f4023bb6be23741d..f557535c1d7b3e5f5351ccf4509c2ad1371a5948 100644 (file)
@@ -61,19 +61,23 @@ struct atpx_mux {
        u16 mux;
 } __packed;
 
-bool radeon_has_atpx(void) {
+bool radeon_has_atpx(void)
+{
        return radeon_atpx_priv.atpx_detected;
 }
 
-bool radeon_has_atpx_dgpu_power_cntl(void) {
+bool radeon_has_atpx_dgpu_power_cntl(void)
+{
        return radeon_atpx_priv.atpx.functions.power_cntl;
 }
 
-bool radeon_is_atpx_hybrid(void) {
+bool radeon_is_atpx_hybrid(void)
+{
        return radeon_atpx_priv.atpx.is_hybrid;
 }
 
-bool radeon_atpx_dgpu_req_power_for_displays(void) {
+bool radeon_atpx_dgpu_req_power_for_displays(void)
+{
        return radeon_atpx_priv.atpx.dgpu_req_power_for_displays;
 }
 
index 91b58fbc2be77af33ae1f2a097e8188af8ca7095..74753bb26d3399fe2aba3c65b521581627cf1560 100644 (file)
 
 void dce6_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin,
                u8 enable_mask);
-struct r600_audio_pinr600_audio_get_pin(struct radeon_device *rdev);
-struct r600_audio_pindce6_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
 static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 
-static const u32 pin_offsets[7] =
-{
+static const u32 pin_offsets[7] = {
        (0x5e00 - 0x5e00),
        (0x5e18 - 0x5e00),
        (0x5e30 - 0x5e00),
@@ -361,7 +360,7 @@ static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
                radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
 }
 
-struct r600_audio_pinradeon_audio_get_pin(struct drm_encoder *encoder)
+struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -528,7 +527,7 @@ static void radeon_audio_calc_cts(unsigned int clock, int *CTS, int *N, int freq
                *N, *CTS, freq);
 }
 
-static const struct radeon_hdmi_acrradeon_audio_acr(unsigned int clock)
+static const struct radeon_hdmi_acr *radeon_audio_acr(unsigned int clock)
 {
        static struct radeon_hdmi_acr res;
        u8 i;
index dacaaa007051e6fe81c7d8c55e41fc7d9d871e79..a073dadd063808b93078781772901ed95abd6575 100644 (file)
@@ -34,8 +34,7 @@ struct cea_sad;
 #define WREG32_ENDPOINT(block, reg, v) \
        radeon_audio_endpoint_wreg(rdev, (block), (reg), (v))
 
-struct radeon_audio_basic_funcs
-{
+struct radeon_audio_basic_funcs {
        u32  (*endpoint_rreg)(struct radeon_device *rdev, u32 offset, u32 reg);
        void (*endpoint_wreg)(struct radeon_device *rdev,
                u32 offset, u32 reg, u32 v);
@@ -43,8 +42,7 @@ struct radeon_audio_basic_funcs
                struct r600_audio_pin *pin, u8 enable_mask);
 };
 
-struct radeon_audio_funcs
-{
+struct radeon_audio_funcs {
        void (*select_pin)(struct drm_encoder *encoder);
        struct r600_audio_pin* (*get_pin)(struct radeon_device *rdev);
        void (*write_latency_fields)(struct drm_encoder *encoder,
index 59c4db13d90ae323d896d472e8400007e0b1c011..546381a5c918d60de85016cea588ae02eb790f86 100644 (file)
@@ -603,8 +603,7 @@ struct atom_memory_info {
 
 #define MAX_AC_TIMING_ENTRIES 16
 
-struct atom_memory_clock_range_table
-{
+struct atom_memory_clock_range_table {
        u8 num_entries;
        u8 rsv[3];
        u32 mclk[MAX_AC_TIMING_ENTRIES];
@@ -632,14 +631,12 @@ struct atom_mc_reg_table {
 
 #define MAX_VOLTAGE_ENTRIES 32
 
-struct atom_voltage_table_entry
-{
+struct atom_voltage_table_entry {
        u16 value;
        u32 smio_low;
 };
 
-struct atom_voltage_table
-{
+struct atom_voltage_table {
        u32 count;
        u32 mask_low;
        u32 phase_delay;
index b73fd9ab02522a7fa5b2310bffa8f6c80893289f..4482c8c5f5cedfc1884396b94f217089d72f34e7 100644 (file)
@@ -587,7 +587,7 @@ static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
        int err;
        int value;
 
-       if(!rdev->asic->dpm.fan_ctrl_set_mode)
+       if (!rdev->asic->dpm.fan_ctrl_set_mode)
                return -EINVAL;
 
        err = kstrtoint(buf, 10, &value);
@@ -789,7 +789,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
                return 0;
 
        /* Skip vddc attribute if get_current_vddc is not implemented */
-       if(attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
+       if (attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
                !rdev->asic->dpm.get_current_vddc)
                return 0;
 
index 922a29e58880270f1edc9325f9e805bd9076a862..d7f552d441ab79cb6b375171b6161014ed8a3e0f 100644 (file)
@@ -86,7 +86,7 @@ int rs400_gart_init(struct radeon_device *rdev)
                return 0;
        }
        /* Check gart size */
-       switch(rdev->mc.gtt_size / (1024 * 1024)) {
+       switch (rdev->mc.gtt_size / (1024 * 1024)) {
        case 32:
        case 64:
        case 128:
@@ -116,7 +116,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
        tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
        WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
        /* Check gart size */
-       switch(rdev->mc.gtt_size / (1024 * 1024)) {
+       switch (rdev->mc.gtt_size / (1024 * 1024)) {
        case 32:
                size_reg = RS480_VA_SIZE_32MB;
                break;
index 8cf87a0a2b2a00767b30b216633d2777c63a94d9..5c162778899b0997704cf67cd95c6498976811a9 100644 (file)
@@ -54,8 +54,7 @@
 static void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
        0,
        AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
 };
index 76260fdfbaa725c35c9ec5abc67b37bd029a9f91..79709d26d9831300b47890cff02b2f91a7ff1cb1 100644 (file)
@@ -42,8 +42,7 @@
 static void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
-static const u32 crtc_offsets[2] =
-{
+static const u32 crtc_offsets[2] = {
        0,
        AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
 };
index 8035d53ebea6949a85808c2be0c2ad9ab7975712..020c0dc8361d581581082cc09f6e79aa7f8e3c87 100644 (file)
@@ -28,8 +28,7 @@
 #include "r600_dpm.h"
 
 /* Represents a single SCLK step. */
-struct rv6xx_sclk_stepping
-{
+struct rv6xx_sclk_stepping {
     u32 vco_frequency;
     u32 post_divider;
 };
index ef2f1a048cfeddf4ece2450abe7f112139682d93..e3e1f6833f121709cf15f4557a8ea4c319c07c00 100644 (file)
@@ -1010,7 +1010,7 @@ int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
        struct rv7xx_power_info *pi = rv770_get_pi(rdev);
 
        if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
-            (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
+            (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low)) {
                voltage->index = MVDD_LOW_INDEX;
                voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
        } else {
@@ -1260,7 +1260,7 @@ static int rv770_construct_vddc_table(struct radeon_device *rdev)
                pi->vddc_mask_low = gpio_mask;
                if (i > 0) {
                        if ((pi->vddc_table[i].low_smio !=
-                            pi->vddc_table[i - 1].low_smio ) ||
+                            pi->vddc_table[i - 1].low_smio) ||
                             (pi->vddc_table[i].high_smio !=
                              pi->vddc_table[i - 1].high_smio))
                                vddc_index++;
index 3b2c963c4880048646d6b80bf80ade153f505db3..d8e8f70135f2a4dfebacb259229baa3f1d96b633 100644 (file)
@@ -31,8 +31,7 @@
 
 #define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE    3
 
-struct RV770_SMC_SCLK_VALUE
-{
+struct RV770_SMC_SCLK_VALUE {
     uint32_t        vCG_SPLL_FUNC_CNTL;
     uint32_t        vCG_SPLL_FUNC_CNTL_2;
     uint32_t        vCG_SPLL_FUNC_CNTL_3;
@@ -43,8 +42,7 @@ struct RV770_SMC_SCLK_VALUE
 
 typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
 
-struct RV770_SMC_MCLK_VALUE
-{
+struct RV770_SMC_MCLK_VALUE {
     uint32_t        vMPLL_AD_FUNC_CNTL;
     uint32_t        vMPLL_AD_FUNC_CNTL_2;
     uint32_t        vMPLL_DQ_FUNC_CNTL;
@@ -59,8 +57,7 @@ struct RV770_SMC_MCLK_VALUE
 typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
 
 
-struct RV730_SMC_MCLK_VALUE
-{
+struct RV730_SMC_MCLK_VALUE {
     uint32_t        vMCLK_PWRMGT_CNTL;
     uint32_t        vDLL_CNTL;
     uint32_t        vMPLL_FUNC_CNTL;
@@ -73,8 +70,7 @@ struct RV730_SMC_MCLK_VALUE
 
 typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
 
-struct RV770_SMC_VOLTAGE_VALUE
-{
+struct RV770_SMC_VOLTAGE_VALUE {
     uint16_t             value;
     uint8_t              index;
     uint8_t              padding;
@@ -82,16 +78,14 @@ struct RV770_SMC_VOLTAGE_VALUE
 
 typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
 
-union RV7XX_SMC_MCLK_VALUE
-{
+union RV7XX_SMC_MCLK_VALUE {
     RV770_SMC_MCLK_VALUE    mclk770;
     RV730_SMC_MCLK_VALUE    mclk730;
 };
 
 typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
 
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
+struct RV770_SMC_HW_PERFORMANCE_LEVEL {
     uint8_t                 arbValue;
     union{
         uint8_t             seqValue;
@@ -126,8 +120,7 @@ struct RV770_SMC_HW_PERFORMANCE_LEVEL
 
 typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
 
-struct RV770_SMC_SWSTATE
-{
+struct RV770_SMC_SWSTATE {
     uint8_t           flags;
     uint8_t           padding1;
     uint8_t           padding2;
@@ -142,8 +135,7 @@ typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
 #define RV770_SMC_VOLTAGEMASK_VDDCI 2
 #define RV770_SMC_VOLTAGEMASK_MAX  4
 
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
+struct RV770_SMC_VOLTAGEMASKTABLE {
     uint8_t  highMask[RV770_SMC_VOLTAGEMASK_MAX];
     uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
 };
@@ -152,8 +144,7 @@ typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
 
 #define MAX_NO_VREG_STEPS 32
 
-struct RV770_SMC_STATETABLE
-{
+struct RV770_SMC_STATETABLE {
     uint8_t             thermalProtectType;
     uint8_t             systemFlags;
     uint8_t             maxVDDCIndexInPPTable;
index 85e9cba49cecb2de2a4f6ea214716c0c3947ecf5..93f197d96d8f7ea084db4d18fb279ff27a40d6b7 100644 (file)
@@ -138,8 +138,7 @@ static void si_fini_pg(struct radeon_device *rdev);
 static void si_fini_cg(struct radeon_device *rdev);
 static void si_rlc_stop(struct radeon_device *rdev);
 
-static const u32 crtc_offsets[] =
-{
+static const u32 crtc_offsets[] = {
        EVERGREEN_CRTC0_REGISTER_OFFSET,
        EVERGREEN_CRTC1_REGISTER_OFFSET,
        EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -148,8 +147,7 @@ static const u32 crtc_offsets[] =
        EVERGREEN_CRTC5_REGISTER_OFFSET
 };
 
-static const u32 si_disp_int_status[] =
-{
+static const u32 si_disp_int_status[] = {
        DISP_INTERRUPT_STATUS,
        DISP_INTERRUPT_STATUS_CONTINUE,
        DISP_INTERRUPT_STATUS_CONTINUE2,
@@ -162,8 +160,7 @@ static const u32 si_disp_int_status[] =
 #define DC_HPDx_INT_CONTROL(x)    (DC_HPD1_INT_CONTROL + (x * 0xc))
 #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS  + (x * 0xc))
 
-static const u32 verde_rlc_save_restore_register_list[] =
-{
+static const u32 verde_rlc_save_restore_register_list[] = {
        (0x8000 << 16) | (0x98f4 >> 2),
        0x00000000,
        (0x8040 << 16) | (0x98f4 >> 2),
@@ -384,8 +381,7 @@ static const u32 verde_rlc_save_restore_register_list[] =
        0x00000000
 };
 
-static const u32 tahiti_golden_rlc_registers[] =
-{
+static const u32 tahiti_golden_rlc_registers[] = {
        0xc424, 0xffffffff, 0x00601005,
        0xc47c, 0xffffffff, 0x10104040,
        0xc488, 0xffffffff, 0x0100000a,
@@ -394,8 +390,7 @@ static const u32 tahiti_golden_rlc_registers[] =
        0xf4a8, 0xffffffff, 0x00000000
 };
 
-static const u32 tahiti_golden_registers[] =
-{
+static const u32 tahiti_golden_registers[] = {
        0x9a10, 0x00010000, 0x00018208,
        0x9830, 0xffffffff, 0x00000000,
        0x9834, 0xf00fffff, 0x00000400,
@@ -429,13 +424,11 @@ static const u32 tahiti_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
-static const u32 tahiti_golden_registers2[] =
-{
+static const u32 tahiti_golden_registers2[] = {
        0xc64, 0x00000001, 0x00000001
 };
 
-static const u32 pitcairn_golden_rlc_registers[] =
-{
+static const u32 pitcairn_golden_rlc_registers[] = {
        0xc424, 0xffffffff, 0x00601004,
        0xc47c, 0xffffffff, 0x10102020,
        0xc488, 0xffffffff, 0x01000020,
@@ -443,8 +436,7 @@ static const u32 pitcairn_golden_rlc_registers[] =
        0xc30c, 0xffffffff, 0x800000a4
 };
 
-static const u32 pitcairn_golden_registers[] =
-{
+static const u32 pitcairn_golden_registers[] = {
        0x9a10, 0x00010000, 0x00018208,
        0x9830, 0xffffffff, 0x00000000,
        0x9834, 0xf00fffff, 0x00000400,
@@ -474,8 +466,7 @@ static const u32 pitcairn_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
-static const u32 verde_golden_rlc_registers[] =
-{
+static const u32 verde_golden_rlc_registers[] = {
        0xc424, 0xffffffff, 0x033f1005,
        0xc47c, 0xffffffff, 0x10808020,
        0xc488, 0xffffffff, 0x00800008,
@@ -483,8 +474,7 @@ static const u32 verde_golden_rlc_registers[] =
        0xc30c, 0xffffffff, 0x80010014
 };
 
-static const u32 verde_golden_registers[] =
-{
+static const u32 verde_golden_registers[] = {
        0x9a10, 0x00010000, 0x00018208,
        0x9830, 0xffffffff, 0x00000000,
        0x9834, 0xf00fffff, 0x00000400,
@@ -539,8 +529,7 @@ static const u32 verde_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
-static const u32 oland_golden_rlc_registers[] =
-{
+static const u32 oland_golden_rlc_registers[] = {
        0xc424, 0xffffffff, 0x00601005,
        0xc47c, 0xffffffff, 0x10104040,
        0xc488, 0xffffffff, 0x0100000a,
@@ -548,8 +537,7 @@ static const u32 oland_golden_rlc_registers[] =
        0xc30c, 0xffffffff, 0x800000f4
 };
 
-static const u32 oland_golden_registers[] =
-{
+static const u32 oland_golden_registers[] = {
        0x9a10, 0x00010000, 0x00018208,
        0x9830, 0xffffffff, 0x00000000,
        0x9834, 0xf00fffff, 0x00000400,
@@ -579,8 +567,7 @@ static const u32 oland_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
-static const u32 hainan_golden_registers[] =
-{
+static const u32 hainan_golden_registers[] = {
        0x9a10, 0x00010000, 0x00018208,
        0x9830, 0xffffffff, 0x00000000,
        0x9834, 0xf00fffff, 0x00000400,
@@ -608,13 +595,11 @@ static const u32 hainan_golden_registers[] =
        0x15c0, 0x000c0fc0, 0x000c0400
 };
 
-static const u32 hainan_golden_registers2[] =
-{
+static const u32 hainan_golden_registers2[] = {
        0x98f8, 0xffffffff, 0x02010001
 };
 
-static const u32 tahiti_mgcg_cgcg_init[] =
-{
+static const u32 tahiti_mgcg_cgcg_init[] = {
        0xc400, 0xffffffff, 0xfffffffc,
        0x802c, 0xffffffff, 0xe0000000,
        0x9a60, 0xffffffff, 0x00000100,
@@ -743,8 +728,7 @@ static const u32 tahiti_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
-static const u32 pitcairn_mgcg_cgcg_init[] =
-{
+static const u32 pitcairn_mgcg_cgcg_init[] = {
        0xc400, 0xffffffff, 0xfffffffc,
        0x802c, 0xffffffff, 0xe0000000,
        0x9a60, 0xffffffff, 0x00000100,
@@ -841,8 +825,7 @@ static const u32 pitcairn_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
-static const u32 verde_mgcg_cgcg_init[] =
-{
+static const u32 verde_mgcg_cgcg_init[] = {
        0xc400, 0xffffffff, 0xfffffffc,
        0x802c, 0xffffffff, 0xe0000000,
        0x9a60, 0xffffffff, 0x00000100,
@@ -941,8 +924,7 @@ static const u32 verde_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
-static const u32 oland_mgcg_cgcg_init[] =
-{
+static const u32 oland_mgcg_cgcg_init[] = {
        0xc400, 0xffffffff, 0xfffffffc,
        0x802c, 0xffffffff, 0xe0000000,
        0x9a60, 0xffffffff, 0x00000100,
@@ -1021,8 +1003,7 @@ static const u32 oland_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
-static const u32 hainan_mgcg_cgcg_init[] =
-{
+static const u32 hainan_mgcg_cgcg_init[] = {
        0xc400, 0xffffffff, 0xfffffffc,
        0x802c, 0xffffffff, 0xe0000000,
        0x9a60, 0xffffffff, 0x00000100,
@@ -1098,8 +1079,7 @@ static const u32 hainan_mgcg_cgcg_init[] =
        0xd8c0, 0xfffffff0, 0x00000100
 };
 
-static u32 verde_pg_init[] =
-{
+static u32 verde_pg_init[] = {
        0x353c, 0xffffffff, 0x40000,
        0x3538, 0xffffffff, 0x200010ff,
        0x353c, 0xffffffff, 0x0,
@@ -1768,7 +1748,8 @@ static int si_init_microcode(struct radeon_device *rdev)
                mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
                smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
                break;
-       default: BUG();
+       default:
+               BUG();
        }
 
        /* this memory configuration requires special firmware */
index fbf968e3f6d789c5a00ad8d98793464babb23f2d..9deb91970d4df23f7de74d9b75b777275f4a4fff 100644 (file)
@@ -46,8 +46,7 @@
 
 #define SCLK_MIN_DEEPSLEEP_FREQ     1350
 
-static const struct si_cac_config_reg cac_weights_tahiti[] =
-{
+static const struct si_cac_config_reg cac_weights_tahiti[] = {
        { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
@@ -111,8 +110,7 @@ static const struct si_cac_config_reg cac_weights_tahiti[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg lcac_tahiti[] =
-{
+static const struct si_cac_config_reg lcac_tahiti[] = {
        { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
        { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
        { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
@@ -203,13 +201,11 @@ static const struct si_cac_config_reg lcac_tahiti[] =
 
 };
 
-static const struct si_cac_config_reg cac_override_tahiti[] =
-{
+static const struct si_cac_config_reg cac_override_tahiti[] = {
        { 0xFFFFFFFF }
 };
 
-static const struct si_powertune_data powertune_data_tahiti =
-{
+static const struct si_powertune_data powertune_data_tahiti = {
        ((1 << 16) | 27027),
        6,
        0,
@@ -239,8 +235,7 @@ static const struct si_powertune_data powertune_data_tahiti =
        true
 };
 
-static const struct si_dte_data dte_data_tahiti =
-{
+static const struct si_dte_data dte_data_tahiti = {
        { 1159409, 0, 0, 0, 0 },
        { 777, 0, 0, 0, 0 },
        2,
@@ -257,8 +252,7 @@ static const struct si_dte_data dte_data_tahiti =
        false
 };
 
-static const struct si_dte_data dte_data_tahiti_pro =
-{
+static const struct si_dte_data dte_data_tahiti_pro = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -275,8 +269,7 @@ static const struct si_dte_data dte_data_tahiti_pro =
        true
 };
 
-static const struct si_dte_data dte_data_new_zealand =
-{
+static const struct si_dte_data dte_data_new_zealand = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
        { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
        0x5,
@@ -293,8 +286,7 @@ static const struct si_dte_data dte_data_new_zealand =
        true
 };
 
-static const struct si_dte_data dte_data_aruba_pro =
-{
+static const struct si_dte_data dte_data_aruba_pro = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -311,8 +303,7 @@ static const struct si_dte_data dte_data_aruba_pro =
        true
 };
 
-static const struct si_dte_data dte_data_malta =
-{
+static const struct si_dte_data dte_data_malta = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -329,8 +320,7 @@ static const struct si_dte_data dte_data_malta =
        true
 };
 
-static struct si_cac_config_reg cac_weights_pitcairn[] =
-{
+static struct si_cac_config_reg cac_weights_pitcairn[] = {
        { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
@@ -394,8 +384,7 @@ static struct si_cac_config_reg cac_weights_pitcairn[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg lcac_pitcairn[] =
-{
+static const struct si_cac_config_reg lcac_pitcairn[] = {
        { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
        { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
        { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -485,13 +474,11 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_override_pitcairn[] =
-{
+static const struct si_cac_config_reg cac_override_pitcairn[] = {
        { 0xFFFFFFFF }
 };
 
-static const struct si_powertune_data powertune_data_pitcairn =
-{
+static const struct si_powertune_data powertune_data_pitcairn = {
        ((1 << 16) | 27027),
        5,
        0,
@@ -521,8 +508,7 @@ static const struct si_powertune_data powertune_data_pitcairn =
        true
 };
 
-static const struct si_dte_data dte_data_pitcairn =
-{
+static const struct si_dte_data dte_data_pitcairn = {
        { 0, 0, 0, 0, 0 },
        { 0, 0, 0, 0, 0 },
        0,
@@ -539,8 +525,7 @@ static const struct si_dte_data dte_data_pitcairn =
        false
 };
 
-static const struct si_dte_data dte_data_curacao_xt =
-{
+static const struct si_dte_data dte_data_curacao_xt = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -557,8 +542,7 @@ static const struct si_dte_data dte_data_curacao_xt =
        true
 };
 
-static const struct si_dte_data dte_data_curacao_pro =
-{
+static const struct si_dte_data dte_data_curacao_pro = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -575,8 +559,7 @@ static const struct si_dte_data dte_data_curacao_pro =
        true
 };
 
-static const struct si_dte_data dte_data_neptune_xt =
-{
+static const struct si_dte_data dte_data_neptune_xt = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -593,8 +576,7 @@ static const struct si_dte_data dte_data_neptune_xt =
        true
 };
 
-static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_pro[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -658,8 +640,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_chelsea_xt[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -723,8 +704,7 @@ static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_heathrow[] =
-{
+static const struct si_cac_config_reg cac_weights_heathrow[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -788,8 +768,7 @@ static const struct si_cac_config_reg cac_weights_heathrow[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -853,8 +832,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_weights_cape_verde[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -918,8 +896,7 @@ static const struct si_cac_config_reg cac_weights_cape_verde[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg lcac_cape_verde[] =
-{
+static const struct si_cac_config_reg lcac_cape_verde[] = {
        { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
        { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
        { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -977,13 +954,11 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_override_cape_verde[] =
-{
+static const struct si_cac_config_reg cac_override_cape_verde[] = {
        { 0xFFFFFFFF }
 };
 
-static const struct si_powertune_data powertune_data_cape_verde =
-{
+static const struct si_powertune_data powertune_data_cape_verde = {
        ((1 << 16) | 0x6993),
        5,
        0,
@@ -1013,8 +988,7 @@ static const struct si_powertune_data powertune_data_cape_verde =
        true
 };
 
-static const struct si_dte_data dte_data_cape_verde =
-{
+static const struct si_dte_data dte_data_cape_verde = {
        { 0, 0, 0, 0, 0 },
        { 0, 0, 0, 0, 0 },
        0,
@@ -1031,8 +1005,7 @@ static const struct si_dte_data dte_data_cape_verde =
        false
 };
 
-static const struct si_dte_data dte_data_venus_xtx =
-{
+static const struct si_dte_data dte_data_venus_xtx = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
        5,
@@ -1049,8 +1022,7 @@ static const struct si_dte_data dte_data_venus_xtx =
        true
 };
 
-static const struct si_dte_data dte_data_venus_xt =
-{
+static const struct si_dte_data dte_data_venus_xt = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
        5,
@@ -1067,8 +1039,7 @@ static const struct si_dte_data dte_data_venus_xt =
        true
 };
 
-static const struct si_dte_data dte_data_venus_pro =
-{
+static const struct si_dte_data dte_data_venus_pro = {
        {  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
        5,
@@ -1085,8 +1056,7 @@ static const struct si_dte_data dte_data_venus_pro =
        true
 };
 
-static struct si_cac_config_reg cac_weights_oland[] =
-{
+static struct si_cac_config_reg cac_weights_oland[] = {
        { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
@@ -1150,8 +1120,7 @@ static struct si_cac_config_reg cac_weights_oland[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_mars_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_pro[] = {
        { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1215,8 +1184,7 @@ static const struct si_cac_config_reg cac_weights_mars_pro[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_mars_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_mars_xt[] = {
        { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1280,8 +1248,7 @@ static const struct si_cac_config_reg cac_weights_mars_xt[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_oland_pro[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_pro[] = {
        { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1345,8 +1312,7 @@ static const struct si_cac_config_reg cac_weights_oland_pro[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_weights_oland_xt[] =
-{
+static const struct si_cac_config_reg cac_weights_oland_xt[] = {
        { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
@@ -1410,8 +1376,7 @@ static const struct si_cac_config_reg cac_weights_oland_xt[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg lcac_oland[] =
-{
+static const struct si_cac_config_reg lcac_oland[] = {
        { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
        { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
        { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1457,8 +1422,7 @@ static const struct si_cac_config_reg lcac_oland[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg lcac_mars_pro[] =
-{
+static const struct si_cac_config_reg lcac_mars_pro[] = {
        { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
        { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
        { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
@@ -1504,13 +1468,11 @@ static const struct si_cac_config_reg lcac_mars_pro[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_cac_config_reg cac_override_oland[] =
-{
+static const struct si_cac_config_reg cac_override_oland[] = {
        { 0xFFFFFFFF }
 };
 
-static const struct si_powertune_data powertune_data_oland =
-{
+static const struct si_powertune_data powertune_data_oland = {
        ((1 << 16) | 0x6993),
        5,
        0,
@@ -1540,8 +1502,7 @@ static const struct si_powertune_data powertune_data_oland =
        true
 };
 
-static const struct si_powertune_data powertune_data_mars_pro =
-{
+static const struct si_powertune_data powertune_data_mars_pro = {
        ((1 << 16) | 0x6993),
        5,
        0,
@@ -1571,8 +1532,7 @@ static const struct si_powertune_data powertune_data_mars_pro =
        true
 };
 
-static const struct si_dte_data dte_data_oland =
-{
+static const struct si_dte_data dte_data_oland = {
        { 0, 0, 0, 0, 0 },
        { 0, 0, 0, 0, 0 },
        0,
@@ -1589,8 +1549,7 @@ static const struct si_dte_data dte_data_oland =
        false
 };
 
-static const struct si_dte_data dte_data_mars_pro =
-{
+static const struct si_dte_data dte_data_mars_pro = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -1607,8 +1566,7 @@ static const struct si_dte_data dte_data_mars_pro =
        true
 };
 
-static const struct si_dte_data dte_data_sun_xt =
-{
+static const struct si_dte_data dte_data_sun_xt = {
        { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
        { 0x0, 0x0, 0x0, 0x0, 0x0 },
        5,
@@ -1626,8 +1584,7 @@ static const struct si_dte_data dte_data_sun_xt =
 };
 
 
-static const struct si_cac_config_reg cac_weights_hainan[] =
-{
+static const struct si_cac_config_reg cac_weights_hainan[] = {
        { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
        { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
        { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
@@ -1691,8 +1648,7 @@ static const struct si_cac_config_reg cac_weights_hainan[] =
        { 0xFFFFFFFF }
 };
 
-static const struct si_powertune_data powertune_data_hainan =
-{
+static const struct si_powertune_data powertune_data_hainan = {
        ((1 << 16) | 0x6993),
        5,
        0,
index aa857906ef93d18faf65be06c02fe392f69b40a6..4887edebd3482f80d1708026b673e11aeb62f1ae 100644 (file)
 #include "ni_dpm.h"
 #include "sislands_smc.h"
 
-enum si_cac_config_reg_type
-{
+enum si_cac_config_reg_type {
        SISLANDS_CACCONFIG_MMR = 0,
        SISLANDS_CACCONFIG_CGIND,
        SISLANDS_CACCONFIG_MAX
 };
 
-struct si_cac_config_reg
-{
+struct si_cac_config_reg {
        u32 offset;
        u32 mask;
        u32 shift;
@@ -42,8 +40,7 @@ struct si_cac_config_reg
        enum si_cac_config_reg_type type;
 };
 
-struct si_powertune_data
-{
+struct si_powertune_data {
        u32 cac_window;
        u32 l2_lta_window_size_default;
        u8 lts_truncate_default;
@@ -56,8 +53,7 @@ struct si_powertune_data
        bool enable_powertune_by_default;
 };
 
-struct si_dyn_powertune_data
-{
+struct si_dyn_powertune_data {
        u32 cac_leakage;
        s32 leakage_minimum_temperature;
        u32 wintime;
@@ -68,8 +64,7 @@ struct si_dyn_powertune_data
        bool disable_uvd_powertune;
 };
 
-struct si_dte_data
-{
+struct si_dte_data {
        u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
        u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES];
        u32 k;
@@ -122,8 +117,7 @@ struct si_mc_reg_table {
 #define SISLANDS_MCREGISTERTABLE_ULV_SLOT                   2
 #define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT     3
 
-struct si_leakage_voltage_entry
-{
+struct si_leakage_voltage_entry {
        u16 voltage;
        u16 leakage_index;
 };
@@ -131,8 +125,7 @@ struct si_leakage_voltage_entry
 #define SISLANDS_LEAKAGE_INDEX0     0xff01
 #define SISLANDS_MAX_LEAKAGE_COUNT  4
 
-struct si_leakage_voltage
-{
+struct si_leakage_voltage {
        u16 count;
        struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
 };
index 75a380a15292103741cee61a61ab758149141758..985d720dbc0d99b3d3b1990ee640cdcd0fd9ddb8 100644 (file)
@@ -82,8 +82,7 @@
 #define SCRATCH_B_CURR_SAMU_INDEX_MASK  (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
 
 
-struct SMU7_PIDController
-{
+struct SMU7_PIDController {
     uint32_t Ki;
     int32_t LFWindupUL;
     int32_t LFWindupLL;
@@ -117,8 +116,7 @@ typedef struct SMU7_PIDController SMU7_PIDController;
 #define SMU7_VCE_MCLK_HANDSHAKE_DISABLE                  0x00010000
 #define SMU7_VCE_SCLK_HANDSHAKE_DISABLE                  0x00020000
 
-struct SMU7_Firmware_Header
-{
+struct SMU7_Firmware_Header {
     uint32_t Digest[5];
     uint32_t Version;
     uint32_t HeaderSize;
index 0b0b404ff0916a9e72f241400c113b63b43d4a0f..1f63cbbd65155f738e3b2a6a628230aed3399735 100644 (file)
@@ -35,8 +35,7 @@
 #define SMU7_NUM_GPU_TES 1
 #define SMU7_NUM_NON_TES 2
 
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
     uint32_t        RefClockFrequency;
     uint32_t        PmTimerP;
     uint32_t        FeatureEnables;
@@ -89,8 +88,7 @@ struct SMU7_SoftRegisters
 
 typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
 
-struct SMU7_Discrete_VoltageLevel
-{
+struct SMU7_Discrete_VoltageLevel {
     uint16_t    Voltage;
     uint16_t    StdVoltageHiSidd;
     uint16_t    StdVoltageLoSidd;
@@ -100,8 +98,7 @@ struct SMU7_Discrete_VoltageLevel
 
 typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
 
-struct SMU7_Discrete_GraphicsLevel
-{
+struct SMU7_Discrete_GraphicsLevel {
     uint32_t    Flags;
     uint32_t    MinVddc;
     uint32_t    MinVddcPhases;
@@ -131,8 +128,7 @@ struct SMU7_Discrete_GraphicsLevel
 
 typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
 
-struct SMU7_Discrete_ACPILevel
-{
+struct SMU7_Discrete_ACPILevel {
     uint32_t    Flags;
     uint32_t    MinVddc;
     uint32_t    MinVddcPhases;
@@ -153,8 +149,7 @@ struct SMU7_Discrete_ACPILevel
 
 typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
 
-struct SMU7_Discrete_Ulv
-{
+struct SMU7_Discrete_Ulv {
     uint32_t    CcPwrDynRm;
     uint32_t    CcPwrDynRm1;
     uint16_t    VddcOffset;
@@ -165,8 +160,7 @@ struct SMU7_Discrete_Ulv
 
 typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
 
-struct SMU7_Discrete_MemoryLevel
-{
+struct SMU7_Discrete_MemoryLevel {
     uint32_t    MinVddc;
     uint32_t    MinVddcPhases;
     uint32_t    MinVddci;
@@ -206,8 +200,7 @@ struct SMU7_Discrete_MemoryLevel
 
 typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
 
-struct SMU7_Discrete_LinkLevel
-{
+struct SMU7_Discrete_LinkLevel {
     uint8_t     PcieGenSpeed;
     uint8_t     PcieLaneCount;
     uint8_t     EnabledForActivity;
@@ -220,8 +213,7 @@ struct SMU7_Discrete_LinkLevel
 typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
 
 
-struct SMU7_Discrete_MCArbDramTimingTableEntry
-{
+struct SMU7_Discrete_MCArbDramTimingTableEntry {
     uint32_t McArbDramTiming;
     uint32_t McArbDramTiming2;
     uint8_t  McArbBurstTime;
@@ -230,15 +222,13 @@ struct SMU7_Discrete_MCArbDramTimingTableEntry
 
 typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
 
-struct SMU7_Discrete_MCArbDramTimingTable
-{
+struct SMU7_Discrete_MCArbDramTimingTable {
     SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
 };
 
 typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
 
-struct SMU7_Discrete_UvdLevel
-{
+struct SMU7_Discrete_UvdLevel {
     uint32_t VclkFrequency;
     uint32_t DclkFrequency;
     uint16_t MinVddc;
@@ -250,8 +240,7 @@ struct SMU7_Discrete_UvdLevel
 
 typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
 
-struct SMU7_Discrete_ExtClkLevel
-{
+struct SMU7_Discrete_ExtClkLevel {
     uint32_t Frequency;
     uint16_t MinVoltage;
     uint8_t  MinPhases;
@@ -260,8 +249,7 @@ struct SMU7_Discrete_ExtClkLevel
 
 typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
 
-struct SMU7_Discrete_StateInfo
-{
+struct SMU7_Discrete_StateInfo {
     uint32_t SclkFrequency;
     uint32_t MclkFrequency;
     uint32_t VclkFrequency;
@@ -285,8 +273,7 @@ struct SMU7_Discrete_StateInfo
 typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
 
 
-struct SMU7_Discrete_DpmTable
-{
+struct SMU7_Discrete_DpmTable {
     SMU7_PIDController                  GraphicsPIDController;
     SMU7_PIDController                  MemoryPIDController;
     SMU7_PIDController                  LinkPIDController;
@@ -406,23 +393,20 @@ typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
 #define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
 #define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
 
-struct SMU7_Discrete_MCRegisterAddress
-{
+struct SMU7_Discrete_MCRegisterAddress {
     uint16_t s0;
     uint16_t s1;
 };
 
 typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
 
-struct SMU7_Discrete_MCRegisterSet
-{
+struct SMU7_Discrete_MCRegisterSet {
     uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
 };
 
 typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
 
-struct SMU7_Discrete_MCRegisters
-{
+struct SMU7_Discrete_MCRegisters {
     uint8_t                             last;
     uint8_t                             reserved[3];
     SMU7_Discrete_MCRegisterAddress     address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
@@ -431,8 +415,7 @@ struct SMU7_Discrete_MCRegisters
 
 typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
 
-struct SMU7_Discrete_FanTable
-{
+struct SMU7_Discrete_FanTable {
        uint16_t FdoMode;
        int16_t  TempMin;
        int16_t  TempMed;
index 78ada9ffd5082d262ecf4bb769acd67650289d98..e130f52fe8d67bd09279c32532d15844ef786b1e 100644 (file)
@@ -36,8 +36,7 @@
 #define SMU7_NUM_NON_TES 2
 
 // All 'soft registers' should be uint32_t.
-struct SMU7_SoftRegisters
-{
+struct SMU7_SoftRegisters {
     uint32_t        RefClockFrequency;
     uint32_t        PmTimerP;
     uint32_t        FeatureEnables;
@@ -80,8 +79,7 @@ struct SMU7_SoftRegisters
 
 typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
 
-struct SMU7_Fusion_GraphicsLevel
-{
+struct SMU7_Fusion_GraphicsLevel {
     uint32_t    MinVddNb;
 
     uint32_t    SclkFrequency;
@@ -111,8 +109,7 @@ struct SMU7_Fusion_GraphicsLevel
 
 typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
 
-struct SMU7_Fusion_GIOLevel
-{
+struct SMU7_Fusion_GIOLevel {
     uint8_t     EnabledForActivity;
     uint8_t     LclkDid;
     uint8_t     Vid;
@@ -137,8 +134,7 @@ struct SMU7_Fusion_GIOLevel
 typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
 
 // UVD VCLK/DCLK state (level) definition.
-struct SMU7_Fusion_UvdLevel
-{
+struct SMU7_Fusion_UvdLevel {
     uint32_t VclkFrequency;
     uint32_t DclkFrequency;
     uint16_t MinVddNb;
@@ -155,8 +151,7 @@ struct SMU7_Fusion_UvdLevel
 typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
 
 // Clocks for other external blocks (VCE, ACP, SAMU).
-struct SMU7_Fusion_ExtClkLevel
-{
+struct SMU7_Fusion_ExtClkLevel {
     uint32_t Frequency;
     uint16_t MinVoltage;
     uint8_t  Divider;
@@ -166,8 +161,7 @@ struct SMU7_Fusion_ExtClkLevel
 };
 typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
 
-struct SMU7_Fusion_ACPILevel
-{
+struct SMU7_Fusion_ACPILevel {
     uint32_t    Flags;
     uint32_t    MinVddNb;
     uint32_t    SclkFrequency;
@@ -181,8 +175,7 @@ struct SMU7_Fusion_ACPILevel
 
 typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
 
-struct SMU7_Fusion_NbDpm
-{
+struct SMU7_Fusion_NbDpm {
     uint8_t DpmXNbPsHi;
     uint8_t DpmXNbPsLo;
     uint8_t Dpm0PgNbPsHi;
@@ -197,8 +190,7 @@ struct SMU7_Fusion_NbDpm
 
 typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
 
-struct SMU7_Fusion_StateInfo
-{
+struct SMU7_Fusion_StateInfo {
     uint32_t SclkFrequency;
     uint32_t LclkFrequency;
     uint32_t VclkFrequency;
@@ -214,8 +206,7 @@ struct SMU7_Fusion_StateInfo
 
 typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
 
-struct SMU7_Fusion_DpmTable
-{
+struct SMU7_Fusion_DpmTable {
     uint32_t                            SystemFlags;
 
     SMU7_PIDController                  GraphicsPIDController;
@@ -230,12 +221,12 @@ struct SMU7_Fusion_DpmTable
     uint8_t                            SamuLevelCount;
     uint16_t                           FpsHighT;
 
-    SMU7_Fusion_GraphicsLevel         GraphicsLevel           [SMU__NUM_SCLK_DPM_STATE];
+    SMU7_Fusion_GraphicsLevel         GraphicsLevel[SMU__NUM_SCLK_DPM_STATE];
     SMU7_Fusion_ACPILevel             ACPILevel;
-    SMU7_Fusion_UvdLevel              UvdLevel                [SMU7_MAX_LEVELS_UVD];
-    SMU7_Fusion_ExtClkLevel           VceLevel                [SMU7_MAX_LEVELS_VCE];
-    SMU7_Fusion_ExtClkLevel           AcpLevel                [SMU7_MAX_LEVELS_ACP];
-    SMU7_Fusion_ExtClkLevel           SamuLevel               [SMU7_MAX_LEVELS_SAMU];
+    SMU7_Fusion_UvdLevel              UvdLevel[SMU7_MAX_LEVELS_UVD];
+    SMU7_Fusion_ExtClkLevel           VceLevel[SMU7_MAX_LEVELS_VCE];
+    SMU7_Fusion_ExtClkLevel           AcpLevel[SMU7_MAX_LEVELS_ACP];
+    SMU7_Fusion_ExtClkLevel           SamuLevel[SMU7_MAX_LEVELS_SAMU];
 
     uint8_t                           UvdBootLevel;
     uint8_t                           VceBootLevel;
@@ -266,10 +257,9 @@ struct SMU7_Fusion_DpmTable
 
 };
 
-struct SMU7_Fusion_GIODpmTable
-{
+struct SMU7_Fusion_GIODpmTable {
 
-    SMU7_Fusion_GIOLevel              GIOLevel                [SMU7_MAX_LEVELS_GIO];
+    SMU7_Fusion_GIOLevel              GIOLevel[SMU7_MAX_LEVELS_GIO];
 
     SMU7_PIDController                GioPIDController;
 
index d49c145db4370f8b189d489e658e25152220c0b3..21d27e6235f396545e66919cd0d0bb11353b0df6 100644 (file)
@@ -33,8 +33,7 @@
 #define SUMO_MINIMUM_ENGINE_CLOCK 800
 #define BOOST_DPM_LEVEL 7
 
-static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] = {
        SUMO_UTC_DFLT_00,
        SUMO_UTC_DFLT_01,
        SUMO_UTC_DFLT_02,
@@ -52,8 +51,7 @@ static const u32 sumo_utc[SUMO_PM_NUMBER_OF_TC] =
        SUMO_UTC_DFLT_14,
 };
 
-static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] =
-{
+static const u32 sumo_dtc[SUMO_PM_NUMBER_OF_TC] = {
        SUMO_DTC_DFLT_00,
        SUMO_DTC_DFLT_01,
        SUMO_DTC_DFLT_02,
@@ -109,11 +107,11 @@ static void sumo_mg_clockgating_enable(struct radeon_device *rdev, bool enable)
        local1 = RREG32(CG_CGTT_LOCAL_1);
 
        if (enable) {
-               WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
-               WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+               WREG32(CG_CGTT_LOCAL_0, (0 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+               WREG32(CG_CGTT_LOCAL_1, (0 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
        } else {
-               WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
-               WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+               WREG32(CG_CGTT_LOCAL_0, (0xFFFFFFFF & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
+               WREG32(CG_CGTT_LOCAL_1, (0xFFFFCFFF & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
        }
 }
 
@@ -702,9 +700,9 @@ static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
        u32 nbps1_new = 0;
 
        if (old_ps != NULL)
-               nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+               nbps1_old = (old_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
 
-       nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)? 1 : 0;
+       nbps1_new = (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE) ? 1 : 0;
 
        if (nbps1_old == 0 && nbps1_new == 1)
                sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
index ef1cc7bad20a76f11537eff4b4483c9ec503624a..b9a2c7ccc881b182974734e6f596dedfe5ec9384 100644 (file)
@@ -39,8 +39,7 @@
 #ifndef TRINITY_MGCG_SEQUENCE
 #define TRINITY_MGCG_SEQUENCE  100
 
-static const u32 trinity_mgcg_shls_default[] =
-{
+static const u32 trinity_mgcg_shls_default[] = {
        /* Register, Value, Mask */
        0x0000802c, 0xc0000000, 0xffffffff,
        0x00003fc4, 0xc0000000, 0xffffffff,
@@ -122,8 +121,7 @@ static const u32 trinity_mgcg_shls_default[] =
 #ifndef TRINITY_SYSLS_SEQUENCE
 #define TRINITY_SYSLS_SEQUENCE  100
 
-static const u32 trinity_sysls_disable[] =
-{
+static const u32 trinity_sysls_disable[] = {
        /* Register, Value, Mask */
        0x0000d0c0, 0x00000000, 0xffffffff,
        0x0000d8c0, 0x00000000, 0xffffffff,
@@ -146,8 +144,7 @@ static const u32 trinity_sysls_disable[] =
        0x00006dfc, 0x0000007f, 0xffffffff
 };
 
-static const u32 trinity_sysls_enable[] =
-{
+static const u32 trinity_sysls_enable[] = {
        /* Register, Value, Mask */
        0x000055e8, 0x00000001, 0xffffffff,
        0x0000d0bc, 0x00000100, 0xffffffff,
@@ -169,8 +166,7 @@ static const u32 trinity_sysls_enable[] =
 };
 #endif
 
-static const u32 trinity_override_mgpg_sequences[] =
-{
+static const u32 trinity_override_mgpg_sequences[] = {
        /* Register, Value */
        0x00000200, 0xE030032C,
        0x00000204, 0x00000FFF,
@@ -366,9 +362,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
                local1 = RREG32_CG(CG_CGTT_LOCAL_1);
 
                WREG32_CG(CG_CGTT_LOCAL_0,
-                         (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+                         (0x00380000 & CGCG_CGTT_LOCAL0_MASK) | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
                WREG32_CG(CG_CGTT_LOCAL_1,
-                         (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+                         (0x0E000000 & CGCG_CGTT_LOCAL1_MASK) | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
 
                WREG32(CGTS_SM_CTRL_REG, CGTS_SM_CTRL_REG_ENABLE);
        } else {
@@ -378,9 +374,9 @@ static void trinity_mg_clockgating_enable(struct radeon_device *rdev,
                local1 = RREG32_CG(CG_CGTT_LOCAL_1);
 
                WREG32_CG(CG_CGTT_LOCAL_0,
-                         CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK) );
+                         CGCG_CGTT_LOCAL0_MASK | (local0 & ~CGCG_CGTT_LOCAL0_MASK));
                WREG32_CG(CG_CGTT_LOCAL_1,
-                         CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK) );
+                         CGCG_CGTT_LOCAL1_MASK | (local1 & ~CGCG_CGTT_LOCAL1_MASK));
        }
 }
 
@@ -1434,7 +1430,7 @@ static void trinity_adjust_uvd_state(struct radeon_device *rdev,
        if (pi->uvd_dpm && r600_is_uvd_state(rps->class, rps->class2)) {
                high_index = trinity_get_uvd_clock_index(rdev, rps);
 
-               switch(high_index) {
+               switch (high_index) {
                case 3:
                case 2:
                        low_index = 1;
index c261657750cacd791876db78b1bea618cced24ac..431e2b68d21e3ee9dd565b85830424869ab08324 100644 (file)
@@ -64,8 +64,7 @@ struct trinity_ps {
 
 #define TRINITY_NUM_NBPSTATES   4
 
-struct trinity_uvd_clock_table_entry
-{
+struct trinity_uvd_clock_table_entry {
        u32 vclk;
        u32 dclk;
        u8 vclk_did;
index 58557c2263a723f0b353d3f20fcbcf1225c500c9..5684639d20a6430527fc6e816cb79847bd61579c 100644 (file)
@@ -142,7 +142,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
        addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
        WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 
-       WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
+       WREG32(UVD_FW_START, *((uint32_t *)rdev->uvd.cpu_addr));
 
        return 0;
 }
index ad21c613fec88072ad7345612f86bed2cc7c2eda..96e32dafd4f05cfb1981c56e036c0ba65246d865 100644 (file)
@@ -865,6 +865,8 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
        #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK                 0xa
        /* Subquery id: Query GPU peak pstate memory clock */
        #define AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK                 0xb
+       /* Subquery id: Query input GPU power   */
+       #define AMDGPU_INFO_SENSOR_GPU_INPUT_POWER      0xc
 /* Number of VRAM page faults on CPU access. */
 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS   0x1E
 #define AMDGPU_INFO_VRAM_LOST_COUNTER          0x1F
index f0ed68974c54390f060328343c0789621835abb3..9ce46edc62a5b1ac5558b3009f7867643d822a2c 100644 (file)
  * - 1.12 - Add DMA buf export ioctl
  * - 1.13 - Add debugger API
  * - 1.14 - Update kfd_event_data
+ * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl
  */
 #define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 14
+#define KFD_IOCTL_MINOR_VERSION 15
 
 struct kfd_ioctl_get_version_args {
        __u32 major_version;    /* from KFD */