Merge tag 'amd-drm-next-5.18-2022-02-11-1' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <airlied@redhat.com>
Mon, 14 Feb 2022 00:31:51 +0000 (10:31 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 14 Feb 2022 00:31:51 +0000 (10:31 +1000)
amd-drm-next-5.18-2022-02-11-1:

amdgpu:
- Clean up of power management code
- Enable freesync video mode by default
- Clean up of RAS code
- Improve VRAM access for debug using SDMA
- Coding style cleanups
- SR-IOV fixes
- More display FP reorg
- TLB flush fixes for Arcuturus, Vega20
- Misc display fixes
- Rework special register access methods for SR-IOV
- DP2 fixes
- DP tunneling fixes
- DSC fixes
- More IP discovery cleanups
- Misc RAS fixes
- Enable both SMU i2c buses where applicable
- s2idle improvements
- DPCS header cleanup
- Add new CAP firmware support for SR-IOV

amdkfd:
- Misc cleanups
- SVM fixes
- CRIU support
- Clean up MQD manager

UAPI:
- Add interface to amdgpu CTX ioctl to request a stable power state for profiling
  https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/207
- Add amdkfd support for CRIU
  https://github.com/checkpoint-restore/criu/pull/1709
- Remove old unused amdkfd debugger interface
  Was only implemented for Kaveri and was only ever used by an old HSA tool that was never open sourced

radeon:
- Fix error handling in radeon_driver_open_kms
- UVD suspend fix
- Misc fixes

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220211220706.5803-1-alexander.deucher@amd.com
394 files changed:
drivers/gpu/drm/amd/amdgpu/aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c [deleted file]
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h [deleted file]
drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c [deleted file]
drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h [deleted file]
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile [deleted file]
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c [deleted file]
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/Makefile
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dm_helpers.h
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c [moved from drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h [moved from drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c [moved from drivers/gpu/drm/amd/display/dc/calcs/custom_float.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c [moved from drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c with 99% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c [moved from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h [moved from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c [moved from drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c [moved from drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c with 100% similarity]
drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h
drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
drivers/gpu/drm/amd/display/dc/inc/resource.h
drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
drivers/gpu/drm/amd/display/dc/link/Makefile [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/virtual/Makefile
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/display/include/bios_parser_types.h
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/display/include/ddc_service_types.h
drivers/gpu/drm/amd/display/include/dpcd_defs.h
drivers/gpu/drm/amd/display/include/grph_object_defs.h
drivers/gpu/drm/amd/display/include/grph_object_id.h
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h [moved from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h with 99% similarity]
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h [moved from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h with 99% similarity]
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_offset.h [moved from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_3_offset.h with 100% similarity]
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h [moved from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_3_sh_mask.h with 100% similarity]
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/Makefile
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c [new file with mode: 0644]
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h [new file with mode: 0644]
drivers/gpu/drm/amd/pm/legacy-dpm/Makefile [new file with mode: 0644]
drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h [moved from drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c [moved from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c with 99% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h [moved from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c [moved from drivers/gpu/drm/amd/pm/powerplay/kv_smc.c with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c [new file with mode: 0644]
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h [new file with mode: 0644]
drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h [moved from drivers/gpu/drm/amd/pm/powerplay/ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h [moved from drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c [moved from drivers/gpu/drm/amd/pm/powerplay/si_dpm.c with 98% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h [moved from drivers/gpu/drm/amd/pm/powerplay/si_dpm.h with 99% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c [moved from drivers/gpu/drm/amd/pm/powerplay/si_smc.c with 100% similarity]
drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h [moved from drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/Makefile
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h [moved from drivers/gpu/drm/amd/pm/inc/amd_powerplay.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h [moved from drivers/gpu/drm/amd/pm/inc/hardwaremanager.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h [moved from drivers/gpu/drm/amd/pm/inc/hwmgr.h with 99% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h [moved from drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h [moved from drivers/gpu/drm/amd/pm/inc/power_state.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h [moved from drivers/gpu/drm/amd/pm/inc/pp_debug.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h [moved from drivers/gpu/drm/amd/pm/inc/pp_endian.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h [moved from drivers/gpu/drm/amd/pm/inc/pp_thermal.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h [moved from drivers/gpu/drm/amd/pm/inc/ppinterrupt.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h [moved from drivers/gpu/drm/amd/pm/inc/smu10.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h [moved from drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h [moved from drivers/gpu/drm/amd/pm/inc/smu7.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h [moved from drivers/gpu/drm/amd/pm/inc/smu71.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu71_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h [moved from drivers/gpu/drm/amd/pm/inc/smu72.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu72_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h [moved from drivers/gpu/drm/amd/pm/inc/smu73.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu73_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h [moved from drivers/gpu/drm/amd/pm/inc/smu74.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu74_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h [moved from drivers/gpu/drm/amd/pm/inc/smu75.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu75_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h [moved from drivers/gpu/drm/amd/pm/inc/smu7_common.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h [moved from drivers/gpu/drm/amd/pm/inc/smu7_discrete.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h [moved from drivers/gpu/drm/amd/pm/inc/smu7_fusion.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h [moved from drivers/gpu/drm/amd/pm/inc/smu8.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h [moved from drivers/gpu/drm/amd/pm/inc/smu8_fusion.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h [moved from drivers/gpu/drm/amd/pm/inc/smu9.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h [moved from drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h [moved from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h with 99% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h [moved from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h [moved from drivers/gpu/drm/amd/pm/inc/smumgr.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h [moved from drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h [moved from drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h with 97% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h with 99% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h [moved from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h [moved from drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h [moved from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h [moved from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h [moved from drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h [moved from drivers/gpu/drm/amd/pm/inc/smu_types.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_0.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v12_0.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v13_0.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h [moved from drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h with 100% similarity]
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/kfd_ioctl.h

index bcfdb63b1d4215e98d037dafb3edddfc01277cdf..a545df4efce10371ff9219b06e230860f6825c85 100644 (file)
@@ -260,7 +260,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
        adev->gfx.rlc.funcs->resume(adev);
 
        /* Wait for FW reset event complete */
-       r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+       r = amdgpu_dpm_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
        if (r) {
                dev_err(adev->dev,
                        "Failed to get response from firmware after reset\n");
index d8b854fcbffa7dc8a4925bf90eabab4475f04696..2931c8ff4cc63842b73f41b7f88460570d153b0e 100644 (file)
@@ -99,7 +99,6 @@
 #include "amdgpu_gem.h"
 #include "amdgpu_doorbell.h"
 #include "amdgpu_amdkfd.h"
-#include "amdgpu_smu.h"
 #include "amdgpu_discovery.h"
 #include "amdgpu_mes.h"
 #include "amdgpu_umc.h"
 #include "amdgpu_smuio.h"
 #include "amdgpu_fdinfo.h"
 #include "amdgpu_mca.h"
+#include "amdgpu_ras.h"
 
 #define MAX_GPU_INSTANCE               16
 
@@ -197,7 +197,6 @@ extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
 extern int amdgpu_smu_pptable_id;
 extern uint amdgpu_dc_feature_mask;
-extern uint amdgpu_freesync_vid_mode;
 extern uint amdgpu_dc_debug_mask;
 extern uint amdgpu_dm_abm_level;
 extern int amdgpu_backlight;
@@ -373,7 +372,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
  */
 bool amdgpu_get_bios(struct amdgpu_device *adev);
 bool amdgpu_read_bios(struct amdgpu_device *adev);
-
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+                                    u8 *bios, u32 length_bytes);
 /*
  * Clocks
  */
@@ -950,12 +950,6 @@ struct amdgpu_device {
 
        /* powerplay */
        struct amd_powerplay            powerplay;
-       bool                            pp_force_state_enabled;
-
-       /* smu */
-       struct smu_context              smu;
-
-       /* dpm */
        struct amdgpu_pm                pm;
        u32                             cg_flags;
        u32                             pg_flags;
@@ -1100,6 +1094,8 @@ struct amdgpu_device {
        uint32_t                        ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
 
        bool                            ram_is_direct_mapped;
+
+       struct list_head                ras_list;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1321,6 +1317,10 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
                struct amdgpu_ring *ring);
 
 void amdgpu_device_halt(struct amdgpu_device *adev);
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+                               u32 reg);
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+                               u32 reg, u32 v);
 
 /* atpx handler */
 #if defined(CONFIG_VGA_SWITCHEROO)
@@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
 
 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
 void amdgpu_acpi_detect(void);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
 static inline void amdgpu_acpi_detect(void) { }
 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
                                                 enum amdgpu_ss ss_state) { return 0; }
 #endif
 
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
+
 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                           uint64_t addr, struct amdgpu_bo **bo,
                           struct amdgpu_bo_va_mapping **mapping);
@@ -1452,6 +1458,15 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
                               enum amd_powergating_state state);
 
+static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
+{
+       return amdgpu_gpu_recovery != 0 &&
+               adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
+               adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
+}
+
 #include "amdgpu_object.h"
 
 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
index 4811b0faafd9ad62e24edaf3d281182890440103..0e12315fa0cb87c643426bda0fa3a8f09feb3820 100644 (file)
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
        }
 }
 
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+       return !(adev->flags & AMD_IS_APU) ||
+               (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
 /**
  * amdgpu_acpi_is_s0ix_active
  *
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
 {
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
-       if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
-               if (adev->flags & AMD_IS_APU)
-                       return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+       if (!(adev->flags & AMD_IS_APU) ||
+           (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+               return false;
+
+       if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+               dev_warn_once(adev->dev,
+                             "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+                             "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+               return false;
        }
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+       dev_warn_once(adev->dev,
+                     "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
        return false;
+#else
+       return true;
+#endif /* CONFIG_AMD_PMC */
 }
+
+#endif /* CONFIG_SUSPEND */
index ac841ae8f5cc5a4f7e656761f99e4dabb451781d..4cb14c2fe53fbe3fda50094f1c668279c232ec72 100644 (file)
@@ -131,6 +131,7 @@ struct amdkfd_process_info {
        atomic_t evicted_bos;
        struct delayed_work restore_userptr_work;
        struct pid *pid;
+       bool block_mmu_notifications;
 };
 
 int amdgpu_amdkfd_init(void);
@@ -268,7 +269,7 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                struct amdgpu_device *adev, uint64_t va, uint64_t size,
                void *drm_priv, struct kgd_mem **mem,
-               uint64_t *offset, uint32_t flags);
+               uint64_t *offset, uint32_t flags, bool criu_resume);
 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
                uint64_t *size);
@@ -297,6 +298,10 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
                                struct tile_config *config);
 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
                                bool reset);
+bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
+void amdgpu_amdkfd_block_mmu_notifications(void *p);
+int amdgpu_amdkfd_criu_resume(void *p);
+
 #if IS_ENABLED(CONFIG_HSA_AMD)
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
index 46cd4ee6bafb7b7385f53bbde006e0300f85f7d5..c8935d71820737538f7fab1f2b97dd14241078fe 100644 (file)
@@ -37,10 +37,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_gfx_v9_hqd_destroy,
        .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
-       .address_watch_disable = kgd_gfx_v9_address_watch_disable,
-       .address_watch_execute = kgd_gfx_v9_address_watch_execute,
        .wave_control_execute = kgd_gfx_v9_wave_control_execute,
-       .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
index abe93b3ff765cb024449b008a49f58b110abd7fe..4191af5a3f132282dd01eb1ef1b7afdf967562f1 100644 (file)
@@ -289,10 +289,7 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_gfx_v9_hqd_destroy,
        .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
-       .address_watch_disable = kgd_gfx_v9_address_watch_disable,
-       .address_watch_execute = kgd_gfx_v9_address_watch_execute,
        .wave_control_execute = kgd_gfx_v9_wave_control_execute,
-       .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base =
index 7b7f4b2764c1aeb69f349156a35e55f4f5723ac9..9378fc79e9ea61ef9ad694298bb448bdd5838355 100644 (file)
@@ -671,20 +671,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
        return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 }
 
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
-       return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo)
-{
-       return 0;
-}
-
 static int kgd_wave_control_execute(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd)
@@ -709,13 +695,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
        return 0;
 }
 
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset)
-{
-       return 0;
-}
-
 static void set_vm_context_page_table_base(struct amdgpu_device *adev,
                uint32_t vmid, uint64_t page_table_base)
 {
@@ -767,10 +746,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
-       .address_watch_disable = kgd_address_watch_disable,
-       .address_watch_execute = kgd_address_watch_execute,
        .wave_control_execute = kgd_wave_control_execute,
-       .address_watch_get_offset = kgd_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
                        get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
index 1f37d35740015923f85e69dee9ceaeb154cd5762..e9c80ce13f3edb9a32279467cc76ee7eb69dd83a 100644 (file)
@@ -582,21 +582,6 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
        return 0;
 }
 
-
-static int address_watch_disable_v10_3(struct amdgpu_device *adev)
-{
-       return 0;
-}
-
-static int address_watch_execute_v10_3(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo)
-{
-       return 0;
-}
-
 static int wave_control_execute_v10_3(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd)
@@ -621,13 +606,6 @@ static int wave_control_execute_v10_3(struct amdgpu_device *adev,
        return 0;
 }
 
-static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset)
-{
-       return 0;
-}
-
 static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
                uint32_t vmid, uint64_t page_table_base)
 {
@@ -809,10 +787,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
        .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
        .hqd_destroy = hqd_destroy_v10_3,
        .hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
-       .address_watch_disable = address_watch_disable_v10_3,
-       .address_watch_execute = address_watch_execute_v10_3,
        .wave_control_execute = wave_control_execute_v10_3,
-       .address_watch_get_offset = address_watch_get_offset_v10_3,
        .get_atc_vmid_pasid_mapping_info = NULL,
        .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
        .program_trap_handler_settings = program_trap_handler_settings_v10_3,
index 36528dad7684a47b5120ea84619cf5694b0c3ea4..65552bb7d2f274d880e85fa7b6e5e5abcd77ede0 100644 (file)
@@ -45,43 +45,6 @@ enum {
        MAX_WATCH_ADDRESSES = 4
 };
 
-enum {
-       ADDRESS_WATCH_REG_ADDR_HI = 0,
-       ADDRESS_WATCH_REG_ADDR_LO,
-       ADDRESS_WATCH_REG_CNTL,
-       ADDRESS_WATCH_REG_MAX
-};
-
-/*  not defined in the CI/KV reg file  */
-enum {
-       ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
-       ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
-       ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
-       /* extend the mask to 26 bits to match the low address field */
-       ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
-       ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
-};
-
-static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
-       mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
-       mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
-       mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
-       mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
-};
-
-union TCP_WATCH_CNTL_BITS {
-       struct {
-               uint32_t mask:24;
-               uint32_t vmid:4;
-               uint32_t atc:1;
-               uint32_t mode:2;
-               uint32_t valid:1;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
 static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
                        uint32_t queue, uint32_t vmid)
 {
@@ -529,55 +492,6 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
        return 0;
 }
 
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
-       union TCP_WATCH_CNTL_BITS cntl;
-       unsigned int i;
-
-       cntl.u32All = 0;
-
-       cntl.bitfields.valid = 0;
-       cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
-       cntl.bitfields.atc = 1;
-
-       /* Turning off this address until we set all the registers */
-       for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
-               WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
-                       ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
-       return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo)
-{
-       union TCP_WATCH_CNTL_BITS cntl;
-
-       cntl.u32All = cntl_val;
-
-       /* Turning off this watch point until we set all the registers */
-       cntl.bitfields.valid = 0;
-       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
-               ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
-       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
-               ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
-
-       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
-               ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
-
-       /* Enable the watch point */
-       cntl.bitfields.valid = 1;
-
-       WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
-               ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
-       return 0;
-}
-
 static int kgd_wave_control_execute(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd)
@@ -602,13 +516,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
        return 0;
 }
 
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset)
-{
-       return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
-}
-
 static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
                                        uint8_t vmid, uint16_t *p_pasid)
 {
@@ -665,10 +572,7 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
-       .address_watch_disable = kgd_address_watch_disable,
-       .address_watch_execute = kgd_address_watch_execute,
        .wave_control_execute = kgd_wave_control_execute,
-       .address_watch_get_offset = kgd_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
        .set_scratch_backing_va = set_scratch_backing_va,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
index 52832cd69a93311e8ed61697602470a944d28c14..9dc5f2a0cc07acaf648a936203ca92edc293529e 100644 (file)
@@ -538,20 +538,6 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
        return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 }
 
-static int kgd_address_watch_disable(struct amdgpu_device *adev)
-{
-       return 0;
-}
-
-static int kgd_address_watch_execute(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo)
-{
-       return 0;
-}
-
 static int kgd_wave_control_execute(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd)
@@ -576,13 +562,6 @@ static int kgd_wave_control_execute(struct amdgpu_device *adev,
        return 0;
 }
 
-static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset)
-{
-       return 0;
-}
-
 static void set_scratch_backing_va(struct amdgpu_device *adev,
                                        uint64_t va, uint32_t vmid)
 {
@@ -614,10 +593,7 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_hqd_destroy,
        .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
-       .address_watch_disable = kgd_address_watch_disable,
-       .address_watch_execute = kgd_address_watch_execute,
        .wave_control_execute = kgd_wave_control_execute,
-       .address_watch_get_offset = kgd_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
                        get_atc_vmid_pasid_mapping_info,
        .set_scratch_backing_va = set_scratch_backing_va,
index 1abf662a0e9148c5f6289ea11e2163509f51337a..53895a41932e818d55acb0e436c201ba6e4e1031 100644 (file)
@@ -622,20 +622,6 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
        return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
 }
 
-int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev)
-{
-       return 0;
-}
-
-int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo)
-{
-       return 0;
-}
-
 int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd)
@@ -660,13 +646,6 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
        return 0;
 }
 
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset)
-{
-       return 0;
-}
-
 void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
                        uint32_t vmid, uint64_t page_table_base)
 {
@@ -888,10 +867,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
        .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
        .hqd_destroy = kgd_gfx_v9_hqd_destroy,
        .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
-       .address_watch_disable = kgd_gfx_v9_address_watch_disable,
-       .address_watch_execute = kgd_gfx_v9_address_watch_execute,
        .wave_control_execute = kgd_gfx_v9_wave_control_execute,
-       .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
        .get_atc_vmid_pasid_mapping_info =
                        kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
index 24be49df26fdd88e7a7197771f7de22620296b10..c7ed3bc9053c58fc637ae39cac7a32008dedb388 100644 (file)
@@ -46,19 +46,9 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
                                enum kfd_preempt_type reset_type,
                                unsigned int utimeout, uint32_t pipe_id,
                                uint32_t queue_id);
-int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev);
-int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo);
 int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd);
-uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset);
-
 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
                                        uint8_t vmid, uint16_t *p_pasid);
 
index f9bab963a948aed0ed7a69dedeb02364574f130b..2e00c3fb4bd3579cbf0448ccff2ced6d77e67571 100644 (file)
@@ -778,7 +778,7 @@ unwind:
                        continue;
                if (attachment[i]->bo_va) {
                        amdgpu_bo_reserve(bo[i], true);
-                       amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
+                       amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
                        amdgpu_bo_unreserve(bo[i]);
                        list_del(&attachment[i]->list);
                }
@@ -795,7 +795,7 @@ static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
 
        pr_debug("\t remove VA 0x%llx in entry %p\n",
                        attachment->va, attachment);
-       amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
+       amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
        drm_gem_object_put(&bo->tbo.base);
        list_del(&attachment->list);
        kfree(attachment);
@@ -842,7 +842,8 @@ static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
  *
  * Returns 0 for success, negative errno for errors.
  */
-static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
+                          bool criu_resume)
 {
        struct amdkfd_process_info *process_info = mem->process_info;
        struct amdgpu_bo *bo = mem->bo;
@@ -864,6 +865,18 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
                goto out;
        }
 
+       if (criu_resume) {
+               /*
+                * During a CRIU restore operation, the userptr buffer objects
+                * will be validated in the restore_userptr_work worker at a
+                * later stage when it is scheduled by another ioctl called by
+                * CRIU master process for the target pid for restore.
+                */
+               atomic_inc(&mem->invalid);
+               mutex_unlock(&process_info->lock);
+               return 0;
+       }
+
        ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
        if (ret) {
                pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
@@ -1452,10 +1465,39 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
        return avm->pd_phys_addr;
 }
 
+void amdgpu_amdkfd_block_mmu_notifications(void *p)
+{
+       struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+       mutex_lock(&pinfo->lock);
+       WRITE_ONCE(pinfo->block_mmu_notifications, true);
+       mutex_unlock(&pinfo->lock);
+}
+
+int amdgpu_amdkfd_criu_resume(void *p)
+{
+       int ret = 0;
+       struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+       mutex_lock(&pinfo->lock);
+       pr_debug("scheduling work\n");
+       atomic_inc(&pinfo->evicted_bos);
+       if (!READ_ONCE(pinfo->block_mmu_notifications)) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+       WRITE_ONCE(pinfo->block_mmu_notifications, false);
+       schedule_delayed_work(&pinfo->restore_userptr_work, 0);
+
+out_unlock:
+       mutex_unlock(&pinfo->lock);
+       return ret;
+}
+
 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                struct amdgpu_device *adev, uint64_t va, uint64_t size,
                void *drm_priv, struct kgd_mem **mem,
-               uint64_t *offset, uint32_t flags)
+               uint64_t *offset, uint32_t flags, bool criu_resume)
 {
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
        enum ttm_bo_type bo_type = ttm_bo_type_device;
@@ -1558,7 +1600,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
 
        if (user_addr) {
-               ret = init_user_pages(*mem, user_addr);
+               pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+               ret = init_user_pages(*mem, user_addr, criu_resume);
                if (ret)
                        goto allocate_init_user_pages_failed;
        } else  if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
@@ -1813,12 +1856,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                                true);
        ret = unreserve_bo_and_vms(&ctx, false, false);
 
-       /* Only apply no TLB flush on Aldebaran to
-        * workaround regressions on other Asics.
-        */
-       if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
-               *table_freed = true;
-
        goto out;
 
 out_unreserve:
@@ -2068,6 +2105,10 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
        int evicted_bos;
        int r = 0;
 
+       /* Do not process MMU notifications until stage-4 IOCTL is received */
+       if (READ_ONCE(process_info->block_mmu_notifications))
+               return 0;
+
        atomic_inc(&mem->invalid);
        evicted_bos = atomic_inc_return(&process_info->evicted_bos);
        if (evicted_bos == 1) {
@@ -2635,3 +2676,14 @@ int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
 
        return 0;
 }
+
+bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
+{
+       struct kfd_mem_attachment *entry;
+
+       list_for_each_entry(entry, &mem->attachments, list) {
+               if (entry->is_mapped && entry->adev == adev)
+                       return true;
+       }
+       return false;
+}
index 12a6b1c99c93e9d25f3146456af1f24e5807e50c..9ba4817a91484eadf36159cb6102010a8afd17b4 100644 (file)
@@ -1083,6 +1083,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
        return 0;
 }
 
+#ifdef CONFIG_DRM_AMDGPU_SI
 int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
                                            u32 clock,
                                            bool strobe_mode,
@@ -1503,6 +1504,7 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
        }
        return -EINVAL;
 }
+#endif
 
 bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
 {
index 27e74b1fc260a3b27e97e1fb576bad29457908c3..4153d520e2a369b796c660768ab6f72a0901b9c4 100644 (file)
@@ -160,6 +160,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                                       bool strobe_mode,
                                       struct atom_clock_dividers *dividers);
 
+#ifdef CONFIG_DRM_AMDGPU_SI
 int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
                                            u32 clock,
                                            bool strobe_mode,
@@ -179,6 +180,17 @@ int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
 int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
                                      u8 module_index,
                                      struct atom_mc_reg_table *reg_table);
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+                            u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+                                                     u16 *voltage,
+                                                     u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+                                         u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+                             u8 voltage_type,
+                             u8 *svd_gpio_id, u8 *svc_gpio_id);
+#endif
 
 bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
 
@@ -190,21 +202,11 @@ void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev
 bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
 
 void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
-                            u16 voltage_id, u16 *voltage);
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
-                                                     u16 *voltage,
-                                                     u16 leakage_idx);
-void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
-                                         u16 *vddc, u16 *vddci, u16 *mvdd);
 int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
                                       u8 clock_type,
                                       u32 clock,
                                       bool strobe_mode,
                                       struct atom_clock_dividers *dividers);
-int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
-                             u8 voltage_type,
-                             u8 *svd_gpio_id, u8 *svc_gpio_id);
 
 int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
                                   uint32_t table,
index 27b19503773b93a4cc7a2019d9eff80150212ea5..0eddca795e966d474e335e5041a90ba7461d7aa7 100644 (file)
@@ -464,3 +464,41 @@ success:
        adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false;
        return true;
 }
+
+/* helper function for soc15 and onwards to read bios from rom */
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+                                    u8 *bios, u32 length_bytes)
+{
+       u32 *dw_ptr;
+       u32 i, length_dw;
+       u32 rom_index_offset;
+       u32 rom_data_offset;
+
+       if (bios == NULL)
+               return false;
+       if (length_bytes == 0)
+               return false;
+       /* APU vbios image is part of sbios image */
+       if (adev->flags & AMD_IS_APU)
+               return false;
+       if (!adev->smuio.funcs ||
+           !adev->smuio.funcs->get_rom_index_offset ||
+           !adev->smuio.funcs->get_rom_data_offset)
+               return false;
+
+       dw_ptr = (u32 *)bios;
+       length_dw = ALIGN(length_bytes, 4) / 4;
+
+       rom_index_offset =
+               adev->smuio.funcs->get_rom_index_offset(adev);
+       rom_data_offset =
+               adev->smuio.funcs->get_rom_data_offset(adev);
+
+       /* set rom index to 0 */
+       WREG32(rom_index_offset, 0);
+       /* read out the rom data */
+       for (i = 0; i < length_dw; i++)
+               dw_ptr[i] = RREG32(rom_data_offset);
+
+       return true;
+}
index e8440d306496779ddab124c5393f252301ab1008..10b9e99c8941206b066b1aac7cd47f7e686839d5 100644 (file)
@@ -127,8 +127,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_chunk;
        }
 
-       mutex_lock(&p->ctx->lock);
-
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
@@ -585,6 +583,16 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
+       /* Move fence waiting after getting reservation lock of
+        * PD root. Then there is no need on a ctx mutex lock.
+        */
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entity);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+               goto error_validate;
+       }
+
        amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
                                          &p->bytes_moved_vis_threshold);
        p->bytes_moved = 0;
@@ -700,7 +708,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
        dma_fence_put(parser->fence);
 
        if (parser->ctx) {
-               mutex_unlock(&parser->ctx->lock);
                amdgpu_ctx_put(parser->ctx);
        }
        if (parser->bo_list)
@@ -944,7 +951,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        if (parser->job->uf_addr && ring->funcs->no_user_fence)
                return -EINVAL;
 
-       return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
+       return 0;
 }
 
 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@@ -1360,7 +1367,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                goto out;
 
        r = amdgpu_cs_submit(&parser, cs);
-
 out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 
@@ -1506,6 +1512,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
                return 0;
 
        default:
+               dma_fence_put(fence);
                return -EINVAL;
        }
 }
index da21e60bb8272600025d16e821b2d3f8ce1d1406..c6d4d41c4393e18f5b3108d5cd90b6e084ef3ab1 100644 (file)
@@ -98,7 +98,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
        if (r) {
                DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
-               amdgpu_vm_bo_rmv(adev, *bo_va);
+               amdgpu_vm_bo_del(adev, *bo_va);
                ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
index 468003583b2a3ad76be7538c4624ad9802389812..1c72f6095f087c13c6ee6a9bad803d0617ac9760 100644 (file)
@@ -230,13 +230,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
-       mutex_init(&ctx->lock);
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
        ctx->reset_counter_query = ctx->reset_counter;
        ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
        ctx->init_priority = priority;
        ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
+       ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
 
        return 0;
 }
@@ -255,6 +255,86 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
        kfree(entity);
 }
 
+static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
+                                       u32 *stable_pstate)
+{
+       struct amdgpu_device *adev = ctx->adev;
+       enum amd_dpm_forced_level current_level;
+
+       if (!ctx)
+               return -EINVAL;
+
+       current_level = amdgpu_dpm_get_performance_level(adev);
+
+       switch (current_level) {
+       case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
+               break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+               *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
+               break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+               *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
+               break;
+       case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
+               break;
+       default:
+               *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
+               break;
+       }
+       return 0;
+}
+
+static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
+                                       u32 stable_pstate)
+{
+       struct amdgpu_device *adev = ctx->adev;
+       enum amd_dpm_forced_level level;
+       int r;
+
+       if (!ctx)
+               return -EINVAL;
+
+       mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+       if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
+               r = -EBUSY;
+               goto done;
+       }
+
+       switch (stable_pstate) {
+       case AMDGPU_CTX_STABLE_PSTATE_NONE:
+               level = AMD_DPM_FORCED_LEVEL_AUTO;
+               break;
+       case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
+               level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
+               break;
+       case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
+               level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
+               break;
+       case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
+               level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
+               break;
+       case AMDGPU_CTX_STABLE_PSTATE_PEAK:
+               level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+               break;
+       default:
+               r = -EINVAL;
+               goto done;
+       }
+
+       r = amdgpu_dpm_force_performance_level(adev, level);
+
+       if (level == AMD_DPM_FORCED_LEVEL_AUTO)
+               adev->pm.stable_pstate_ctx = NULL;
+       else
+               adev->pm.stable_pstate_ctx = ctx;
+done:
+       mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
+
+       return r;
+}
+
 static void amdgpu_ctx_fini(struct kref *ref)
 {
        struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
@@ -270,8 +350,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
                        ctx->entities[i][j] = NULL;
                }
        }
-
-       mutex_destroy(&ctx->lock);
+       amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
        kfree(ctx);
 }
 
@@ -467,11 +546,41 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        return 0;
 }
 
+
+
+static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
+                                   struct amdgpu_fpriv *fpriv, uint32_t id,
+                                   bool set, u32 *stable_pstate)
+{
+       struct amdgpu_ctx *ctx;
+       struct amdgpu_ctx_mgr *mgr;
+       int r;
+
+       if (!fpriv)
+               return -EINVAL;
+
+       mgr = &fpriv->ctx_mgr;
+       mutex_lock(&mgr->lock);
+       ctx = idr_find(&mgr->ctx_handles, id);
+       if (!ctx) {
+               mutex_unlock(&mgr->lock);
+               return -EINVAL;
+       }
+
+       if (set)
+               r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
+       else
+               r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
+
+       mutex_unlock(&mgr->lock);
+       return r;
+}
+
 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
                     struct drm_file *filp)
 {
        int r;
-       uint32_t id;
+       uint32_t id, stable_pstate;
        int32_t priority;
 
        union drm_amdgpu_ctx *args = data;
@@ -500,6 +609,20 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
        case AMDGPU_CTX_OP_QUERY_STATE2:
                r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
                break;
+       case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
+               if (args->in.flags)
+                       return -EINVAL;
+               r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
+               args->out.pstate.flags = stable_pstate;
+               break;
+       case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
+               if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
+                       return -EINVAL;
+               stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
+               if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
+                       return -EINVAL;
+               r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
+               break;
        default:
                return -EINVAL;
        }
index a44b8b8ed39c2249146608817a5e468c07ab3f9a..d0cbfcea90f72abed197e97575d849eb727d2249 100644 (file)
@@ -49,10 +49,10 @@ struct amdgpu_ctx {
        bool                            preamble_presented;
        int32_t                         init_priority;
        int32_t                         override_priority;
-       struct mutex                    lock;
        atomic_t                        guilty;
        unsigned long                   ras_counter_ce;
        unsigned long                   ras_counter_ue;
+       uint32_t                        stable_pstate;
 };
 
 struct amdgpu_ctx_mgr {
index 25e2e5bf90eb20c9d85c3e021f09026020d9929c..4b950de9bf663ad7ece3139ce8467d4b0032b2b6 100644 (file)
@@ -1120,8 +1120,10 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
                return -EINVAL;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
-       if (r < 0)
+       if (r < 0) {
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
+       }
 
        while (size) {
                uint32_t value;
@@ -1585,22 +1587,25 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
-               if (ret || val > max_freq || val < min_freq)
-                       return -EINVAL;
-               ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
-       } else {
-               return 0;
+       ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
+       if (ret == -EOPNOTSUPP) {
+               ret = 0;
+               goto out;
        }
+       if (ret || val > max_freq || val < min_freq) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
+       if (ret)
+               ret = -EINVAL;
 
+out:
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
-       if (ret)
-               return -EINVAL;
-
-       return 0;
+       return ret;
 }
 
 DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
index ed077de426d9b9e042217e9a0ce1c4b513e97701..0b98d65056e30425ce001c0fd14074e68774b9c3 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/iommu.h>
+#include <linux/pci.h>
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_probe_helper.h>
@@ -55,7 +56,6 @@
 #include "soc15.h"
 #include "nv.h"
 #include "bif/bif_4_1_d.h"
-#include <linux/pci.h>
 #include <linux/firmware.h>
 #include "amdgpu_vf_error.h"
 
@@ -566,7 +566,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
            adev->gfx.rlc.funcs &&
            adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
-                       return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
+                       return amdgpu_sriov_wreg(adev, reg, v, 0, 0);
        } else if ((reg * 4) >= adev->rmmio_size) {
                adev->pcie_wreg(adev, reg * 4, v);
        } else {
@@ -2073,6 +2073,8 @@ out:
  */
 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 {
+       struct drm_device *dev = adev_to_drm(adev);
+       struct pci_dev *parent;
        int i, r;
 
        amdgpu_device_enable_virtual_display(adev);
@@ -2137,6 +2139,16 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                break;
        }
 
+       if (amdgpu_has_atpx() &&
+           (amdgpu_is_atpx_hybrid() ||
+            amdgpu_has_atpx_dgpu_power_cntl()) &&
+           ((adev->flags & AMD_IS_APU) == 0) &&
+           !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
+               adev->flags |= AMD_IS_PX;
+
+       parent = pci_upstream_bridge(adev->pdev);
+       adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+
        amdgpu_amdkfd_device_probe(adev);
 
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -2624,7 +2636,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
        /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
        if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
                               adev->asic_type == CHIP_ALDEBARAN ))
-               smu_handle_passthrough_sbr(&adev->smu, true);
+               amdgpu_dpm_handle_passthrough_sbr(adev, true);
 
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
                mutex_lock(&mgpu_info.mutex);
@@ -2708,11 +2720,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
                }
        }
 
-       amdgpu_amdkfd_suspend(adev, false);
-
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
+       amdgpu_amdkfd_suspend(adev, false);
+
        /* Workaroud for ASICs need to disable SMC first */
        amdgpu_device_smu_fini_early(adev);
 
@@ -2881,7 +2893,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
        int i, r;
 
        if (adev->in_s0ix)
-               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+               amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -3307,9 +3319,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
                if (adev->asic_reset_res)
                        goto fail;
 
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_count)
-                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
+                   adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+                       adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
        } else {
 
                task_barrier_full(&hive->tb);
@@ -3497,8 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        init_rwsem(&adev->reset_sem);
        mutex_init(&adev->psp.mutex);
        mutex_init(&adev->notifier_lock);
+       mutex_init(&adev->pm.stable_pstate_ctx_lock);
 
-        amdgpu_device_init_apu_flags(adev);
+       amdgpu_device_init_apu_flags(adev);
 
        r = amdgpu_device_check_arguments(adev);
        if (r)
@@ -3519,6 +3532,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
        INIT_LIST_HEAD(&adev->reset_list);
 
+       INIT_LIST_HEAD(&adev->ras_list);
+
        INIT_DELAYED_WORK(&adev->delayed_init_work,
                          amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -4044,7 +4059,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
                return 0;
 
        if (adev->in_s0ix)
-               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
+               amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
 
        /* post card */
        if (amdgpu_device_need_post(adev)) {
@@ -4645,9 +4660,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
 
        if (!r && amdgpu_ras_intr_triggered()) {
                list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-                       if (tmp_adev->mmhub.ras_funcs &&
-                           tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
-                               tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
+                       if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
+                           tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+                               tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
                }
 
                amdgpu_ras_intr_cleared();
@@ -5726,3 +5741,36 @@ void amdgpu_device_halt(struct amdgpu_device *adev)
        pci_disable_device(pdev);
        pci_wait_for_pending_transaction(pdev);
 }
+
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+                               u32 reg)
+{
+       unsigned long flags, address, data;
+       u32 r;
+
+       address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(address, reg * 4);
+       (void)RREG32(address);
+       r = RREG32(data);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       return r;
+}
+
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+                               u32 reg, u32 v)
+{
+       unsigned long flags, address, data;
+
+       address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+       data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       WREG32(address, reg * 4);
+       (void)RREG32(address);
+       WREG32(data, v);
+       (void)RREG32(data);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
index 81bfee978b74f592e23f70eec364b0dc14873f8b..cd7e8522c13045043a9f6d20d08c0d947688d0dd 100644 (file)
@@ -674,6 +674,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 2):
@@ -709,6 +710,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 2):
@@ -846,8 +848,14 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
 {
        if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
                amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+               return 0;
+       }
+
+       if (!amdgpu_device_has_dc_support(adev))
+               return 0;
+
 #if defined(CONFIG_DRM_AMD_DC)
-       } else if (adev->ip_versions[DCE_HWIP][0]) {
+       if (adev->ip_versions[DCE_HWIP][0]) {
                switch (adev->ip_versions[DCE_HWIP][0]) {
                case IP_VERSION(1, 0, 0):
                case IP_VERSION(1, 0, 1):
@@ -882,8 +890,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
                                adev->ip_versions[DCI_HWIP][0]);
                        return -EINVAL;
                }
-#endif
        }
+#endif
        return 0;
 }
 
@@ -904,6 +912,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 2):
        case IP_VERSION(10, 3, 1):
@@ -1038,6 +1047,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 2):
@@ -1217,11 +1227,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
                        return -EINVAL;
 
                amdgpu_discovery_harvest_ip(adev);
-
-               if (!adev->mman.discovery_bin) {
-                       DRM_ERROR("ip discovery uninitialized\n");
-                       return -EINVAL;
-               }
                break;
        }
 
@@ -1242,6 +1247,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 2):
        case IP_VERSION(10, 3, 4):
@@ -1258,6 +1264,20 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
+       switch (adev->ip_versions[GC_HWIP][0]) {
+       case IP_VERSION(9, 1, 0):
+       case IP_VERSION(9, 2, 2):
+       case IP_VERSION(9, 3, 0):
+       case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
+       case IP_VERSION(10, 3, 1):
+       case IP_VERSION(10, 3, 3):
+               adev->flags |= AMD_IS_APU;
+               break;
+       default:
+               break;
+       }
+
        if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
                adev->gmc.xgmi.supported = true;
 
index 3f21a13882a878d78dcdce5bea23a98deb68d504..ec4c9ef5f795982a5af7b513330523f99d40b708 100644 (file)
@@ -512,19 +512,24 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
                case CHIP_STONEY:
                        domain |= AMDGPU_GEM_DOMAIN_GTT;
                        break;
-               case CHIP_RAVEN:
-                       /* enable S/G on PCO and RV2 */
-                       if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
-                           (adev->apu_flags & AMD_APU_IS_PICASSO))
-                               domain |= AMDGPU_GEM_DOMAIN_GTT;
-                       break;
-               case CHIP_RENOIR:
-               case CHIP_VANGOGH:
-               case CHIP_YELLOW_CARP:
-                       domain |= AMDGPU_GEM_DOMAIN_GTT;
-                       break;
-
                default:
+                       switch (adev->ip_versions[DCE_HWIP][0]) {
+                       case IP_VERSION(1, 0, 0):
+                       case IP_VERSION(1, 0, 1):
+                               /* enable S/G on PCO and RV2 */
+                               if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+                                   (adev->apu_flags & AMD_APU_IS_PICASSO))
+                                       domain |= AMDGPU_GEM_DOMAIN_GTT;
+                               break;
+                       case IP_VERSION(2, 1, 0):
+                       case IP_VERSION(3, 0, 1):
+                       case IP_VERSION(3, 1, 2):
+                       case IP_VERSION(3, 1, 3):
+                               domain |= AMDGPU_GEM_DOMAIN_GTT;
+                               break;
+                       default:
+                               break;
+                       }
                        break;
                }
        }
index 4c83f1db8a244427ae65c23bc8af6f9b31a7fdfc..5cdafdcfec59ca5cf1ba9e52496f60fbde66de7f 100644 (file)
  * - 3.42.0 - Add 16bpc fixed point display support
  * - 3.43.0 - Add device hot plug/unplug support
  * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
+ * - 3.45.0 - Add context ioctl stable pstate interface
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       44
+#define KMS_DRIVER_MINOR       45
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit;
@@ -174,7 +175,6 @@ int amdgpu_mes;
 int amdgpu_noretry = -1;
 int amdgpu_force_asic_type = -1;
 int amdgpu_tmz = -1; /* auto */
-uint amdgpu_freesync_vid_mode;
 int amdgpu_reset_method = -1; /* auto */
 int amdgpu_num_kcq = -1;
 int amdgpu_smartshift_bias;
@@ -843,32 +843,6 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
 MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
 module_param_named(tmz, amdgpu_tmz, int, 0444);
 
-/**
- * DOC: freesync_video (uint)
- * Enable the optimization to adjust front porch timing to achieve seamless
- * mode change experience when setting a freesync supported mode for which full
- * modeset is not needed.
- *
- * The Display Core will add a set of modes derived from the base FreeSync
- * video mode into the corresponding connector's mode list based on commonly
- * used refresh rates and VRR range of the connected display, when users enable
- * this feature. From the userspace perspective, they can see a seamless mode
- * change experience when the change between different refresh rates under the
- * same resolution. Additionally, userspace applications such as Video playback
- * can read this modeset list and change the refresh rate based on the video
- * frame rate. Finally, the userspace can also derive an appropriate mode for a
- * particular refresh rate based on the FreeSync Mode and add it to the
- * connector's mode list.
- *
- * Note: This is an experimental feature.
- *
- * The default value: 0 (off).
- */
-MODULE_PARM_DESC(
-       freesync_video,
-       "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
-module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
-
 /**
  * DOC: reset_method (int)
  * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
@@ -1942,10 +1916,10 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
 
        /* Aldebaran */
-       {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+       {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
 
        /* CYAN_SKILLFISH */
        {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -1994,6 +1968,28 @@ static bool amdgpu_is_fw_framebuffer(resource_size_t base,
        return found;
 }
 
+static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
+{
+       struct pci_dev *p = NULL;
+       int i;
+
+       /* 0 - GPU
+        * 1 - audio
+        * 2 - USB
+        * 3 - UCSI
+        */
+       for (i = 1; i < 4; i++) {
+               p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+                                               adev->pdev->bus->number, i);
+               if (p) {
+                       pm_runtime_get_sync(&p->dev);
+                       pm_runtime_mark_last_busy(&p->dev);
+                       pm_runtime_put_autosuspend(&p->dev);
+                       pci_dev_put(p);
+               }
+       }
+}
+
 static int amdgpu_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
 {
@@ -2126,6 +2122,48 @@ retry_init:
        if (ret)
                DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
 
+       if (adev->runpm) {
+               /* only need to skip on ATPX */
+               if (amdgpu_device_supports_px(ddev))
+                       dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+               /* we want direct complete for BOCO */
+               if (amdgpu_device_supports_boco(ddev))
+                       dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
+                                               DPM_FLAG_SMART_SUSPEND |
+                                               DPM_FLAG_MAY_SKIP_RESUME);
+               pm_runtime_use_autosuspend(ddev->dev);
+               pm_runtime_set_autosuspend_delay(ddev->dev, 5000);
+
+               pm_runtime_allow(ddev->dev);
+
+               pm_runtime_mark_last_busy(ddev->dev);
+               pm_runtime_put_autosuspend(ddev->dev);
+
+               /*
+                * For runpm implemented via BACO, PMFW will handle the
+                * timing for BACO in and out:
+                *   - put ASIC into BACO state only when both video and
+                *     audio functions are in D3 state.
+                *   - pull ASIC out of BACO state when either video or
+                *     audio function is in D0 state.
+                * Also, at startup, PMFW assumes both functions are in
+                * D0 state.
+                *
+                * So if snd driver was loaded prior to amdgpu driver
+                * and audio function was put into D3 state, there will
+                * be no PMFW-aware D-state transition(D0->D3) on runpm
+                * suspend. Thus the BACO will be not correctly kicked in.
+                *
+                * Via amdgpu_get_secondary_funcs(), the audio dev is put
+                * into D0 state. Then there will be a PMFW-aware D-state
+                * transition(D0->D3) on runpm suspend.
+                */
+               if (amdgpu_device_supports_baco(ddev) &&
+                   !(adev->flags & AMD_IS_APU) &&
+                   (adev->asic_type >= CHIP_NAVI10))
+                       amdgpu_get_secondary_funcs(adev);
+       }
+
        return 0;
 
 err_pci:
@@ -2137,8 +2175,15 @@ static void
 amdgpu_pci_remove(struct pci_dev *pdev)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
+       struct amdgpu_device *adev = drm_to_adev(dev);
 
        drm_dev_unplug(dev);
+
+       if (adev->runpm) {
+               pm_runtime_get_sync(dev->dev);
+               pm_runtime_forbid(dev->dev);
+       }
+
        amdgpu_driver_unload_kms(dev);
 
        /*
@@ -2246,13 +2291,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
 static int amdgpu_pmops_prepare(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
 
        /* Return a positive number here so
         * DPM_FLAG_SMART_SUSPEND works properly
         */
        if (amdgpu_device_supports_boco(drm_dev))
-               return pm_runtime_suspended(dev) &&
-                       pm_suspend_via_firmware();
+               return pm_runtime_suspended(dev);
+
+       /* if we will not support s3 or s2i for the device
+        *  then skip suspend
+        */
+       if (!amdgpu_acpi_is_s0ix_active(adev) &&
+           !amdgpu_acpi_is_s3_active(adev))
+               return 1;
 
        return 0;
 }
index 2a786e78862777e41399cd5d6dc9a08fc7583081..ecada5eadfe3553b79ef1fd144d3e7a7abd01607 100644 (file)
@@ -30,7 +30,6 @@
 #include "amdgpu_eeprom.h"
 
 #define FRU_EEPROM_MADDR        0x60000
-#define I2C_PRODUCT_INFO_OFFSET 0xC0
 
 static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
 {
@@ -40,7 +39,13 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
         */
        struct atom_context *atom_ctx = adev->mode_info.atom_context;
 
-       /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+       /* The i2c access is blocked on VF
+        * TODO: Need other way to get the info
+        */
+       if (amdgpu_sriov_vf(adev))
+               return false;
+
+       /* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification,
         * we can use just the "DXXX" portion. If there were more models, we
         * could convert the 3 characters to a hex integer and use a switch
         * for ease/speed/readability. For now, 2 string comparisons are
@@ -59,17 +64,24 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
        case CHIP_ALDEBARAN:
                /* All Aldebaran SKUs have the FRU */
                return true;
+       case CHIP_SIENNA_CICHLID:
+               if (strnstr(atom_ctx->vbios_version, "D603",
+                           sizeof(atom_ctx->vbios_version)))
+                       return true;
+               else
+                       return false;
        default:
                return false;
        }
 }
 
 static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
-                                 unsigned char *buff)
+                                 unsigned char *buf, size_t buf_size)
 {
-       int ret, size;
+       int ret;
+       u8 size;
 
-       ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1);
+       ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr, buf, 1);
        if (ret < 1) {
                DRM_WARN("FRU: Failed to get size field");
                return ret;
@@ -78,9 +90,11 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
        /* The size returned by the i2c requires subtraction of 0xC0 since the
         * size apparently always reports as 0xC0+actual size.
         */
-       size = buff[0] - I2C_PRODUCT_INFO_OFFSET;
+       size = buf[0] & 0x3F;
+       size = min_t(size_t, size, buf_size);
 
-       ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size);
+       ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr + 1,
+                                buf, size);
        if (ret < 1) {
                DRM_WARN("FRU: Failed to get data field");
                return ret;
@@ -91,19 +105,15 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
-       unsigned char buff[AMDGPU_PRODUCT_NAME_LEN+2];
+       unsigned char buf[AMDGPU_PRODUCT_NAME_LEN];
        u32 addrptr;
        int size, len;
-       int offset = 2;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
 
-       if (adev->asic_type == CHIP_ALDEBARAN)
-               offset = 0;
-
        /* If algo exists, it means that the i2c_adapter's initialized */
-       if (!adev->pm.smu_i2c.algo) {
+       if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
                return -ENODEV;
        }
@@ -121,7 +131,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
         * and the language field, so just start from 0xb, manufacturer size
         */
        addrptr = FRU_EEPROM_MADDR + 0xb;
-       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
                return -EINVAL;
@@ -131,7 +141,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
         * size field being 1 byte. This pattern continues below.
         */
        addrptr += size + 1;
-       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
                return -EINVAL;
@@ -143,12 +153,11 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
                                AMDGPU_PRODUCT_NAME_LEN);
                len = AMDGPU_PRODUCT_NAME_LEN - 1;
        }
-       /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[offset], len);
+       memcpy(adev->product_name, buf, len);
        adev->product_name[len] = '\0';
 
        addrptr += size + 1;
-       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
                return -EINVAL;
@@ -162,11 +171,11 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
                len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[offset], len);
+       memcpy(adev->product_number, buf, len);
        adev->product_number[len] = '\0';
 
        addrptr += size + 1;
-       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
@@ -174,7 +183,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        }
 
        addrptr += size + 1;
-       size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+       size = amdgpu_fru_read_eeprom(adev, addrptr, buf, sizeof(buf));
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
@@ -189,7 +198,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
                len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[offset], len);
+       memcpy(adev->serial, buf, len);
        adev->serial[len] = '\0';
 
        return 0;
index 645950a653a0c3a7e70c4f0f91f4fb90f222959e..01cb89ffbd566a5128dff3780c3f0017376571da 100644 (file)
@@ -150,7 +150,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
  * replaces them with the dummy page (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages)
 {
        unsigned t;
@@ -161,13 +161,11 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        uint64_t flags = 0;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to unbind memory from uninitialized GART !\n");
-               return -EINVAL;
-       }
+       if (!adev->gart.ptr)
+               return;
 
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
@@ -188,7 +186,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
 
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
@@ -204,7 +201,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
                    int pages, dma_addr_t *dma_addr, uint64_t flags,
                    void *dst)
 {
@@ -212,13 +209,8 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
        unsigned i, j, t;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
 
@@ -230,7 +222,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
                }
        }
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
@@ -246,20 +237,14 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, dma_addr_t *dma_addr,
                     uint64_t flags)
 {
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
        if (!adev->gart.ptr)
-               return 0;
+               return;
 
-       return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                              adev->gart.ptr);
+       amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
 }
 
 /**
@@ -274,6 +259,9 @@ void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
 {
        int i;
 
+       if (!adev->gart.ptr)
+               return;
+
        mb();
        amdgpu_device_flush_hdp(adev, NULL);
        for (i = 0; i < adev->num_vmhubs; i++)
index 78895413cf9fe35433d16b05cc6b921da10988c4..8fea3e04e4110696983ec6f4911424060300cea6 100644 (file)
@@ -46,7 +46,6 @@ struct amdgpu_gart {
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
-       bool                            ready;
 
        /* Asic default pte flags */
        uint64_t                        gart_pte_flags;
@@ -58,12 +57,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-                      int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-                   int pages, dma_addr_t *dma_addr, uint64_t flags,
-                   void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-                    int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+                       int pages);
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+                    int pages, dma_addr_t *dma_addr, uint64_t flags,
+                    void *dst);
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+                     int pages, dma_addr_t *dma_addr, uint64_t flags);
 void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
 #endif
index 9b12cab5e60676ddf3a3d73c5f5f8c3b2259bdec..57b74d35052fbf067c0ddffae523f32d30bb3ee3 100644 (file)
@@ -222,7 +222,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
        if (!bo_va || --bo_va->ref_count)
                goto out_unlock;
 
-       amdgpu_vm_bo_rmv(adev, bo_va);
+       amdgpu_vm_bo_del(adev, bo_va);
        if (!amdgpu_vm_ready(vm))
                goto out_unlock;
 
index 1916ec84dd71f8a4bd787d13322165aadc71a023..43004822ec6ffee8651ed241b2d5894035d6169d 100644 (file)
@@ -615,14 +615,14 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       r = smu_get_status_gfxoff(adev, value);
+       r = amdgpu_dpm_get_status_gfxoff(adev, value);
 
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 
        return r;
 }
 
-int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_fs_if fs_info = {
@@ -695,9 +695,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
         */
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
                kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_count)
-                       adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
+               if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
+                   adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
+                       adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
                amdgpu_ras_reset_gpu(adev);
        }
        return AMDGPU_RAS_SUCCESS;
@@ -852,19 +852,3 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
        }
        return amdgpu_num_kcq;
 }
-
-/* amdgpu_gfx_state_change_set - Handle gfx power state change set
- * @adev: amdgpu_device pointer
- * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
- *
- */
-
-void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
-{
-       mutex_lock(&adev->pm.mutex);
-       if (adev->powerplay.pp_funcs &&
-           adev->powerplay.pp_funcs->gfx_state_change_set)
-               ((adev)->powerplay.pp_funcs->gfx_state_change_set(
-                       (adev)->powerplay.pp_handle, state));
-       mutex_unlock(&adev->pm.mutex);
-}
index f851196c83a55a4f9f4512b32094af2e64ebf073..f99eac544f6d36c0306187228cddd8b3eaead775 100644 (file)
@@ -31,6 +31,7 @@
 #include "amdgpu_ring.h"
 #include "amdgpu_rlc.h"
 #include "soc15.h"
+#include "amdgpu_ras.h"
 
 /* GFX current status */
 #define AMDGPU_GFX_NORMAL_MODE                 0x00000000L
@@ -47,12 +48,6 @@ enum amdgpu_gfx_pipe_priority {
        AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
 };
 
-/* Argument for PPSMC_MSG_GpuChangeState */
-enum gfx_change_state {
-       sGpuChangeState_D0Entry = 1,
-       sGpuChangeState_D3Entry,
-};
-
 #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM  0
 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM  15
 
@@ -204,16 +199,8 @@ struct amdgpu_cu_info {
        uint32_t bitmap[4][4];
 };
 
-struct amdgpu_gfx_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       int (*ras_error_inject)(struct amdgpu_device *adev,
-                               void *inject_if);
-       int (*query_ras_error_count)(struct amdgpu_device *adev,
-                                    void *ras_error_status);
-       void (*reset_ras_error_count)(struct amdgpu_device *adev);
-       void (*query_ras_error_status)(struct amdgpu_device *adev);
-       void (*reset_ras_error_status)(struct amdgpu_device *adev);
+struct amdgpu_gfx_ras {
+       struct amdgpu_ras_block_object  ras_block;
        void (*enable_watchdog_timer)(struct amdgpu_device *adev);
 };
 
@@ -337,7 +324,7 @@ struct amdgpu_gfx {
 
        /*ras */
        struct ras_common_if                    *ras_if;
-       const struct amdgpu_gfx_ras_funcs       *ras_funcs;
+       struct amdgpu_gfx_ras   *ras;
 };
 
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
@@ -399,7 +386,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
                                    int pipe, int queue);
 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
-int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info);
 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
                void *err_data,
@@ -410,5 +397,4 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
-void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state);
 #endif
index 2430d6223c2d732449343a0e4c8dfccbccf06509..ec1203d4979916fab49022cb7577eb428da01a17 100644 (file)
@@ -436,58 +436,58 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
        } while (fault->timestamp < tmp);
 }
 
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+{
+       if (!adev->gmc.xgmi.connected_to_cpu) {
+               adev->gmc.xgmi.ras = &xgmi_ras;
+               amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
+       }
+
+       return 0;
+}
+
 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->umc.ras_funcs &&
-           adev->umc.ras_funcs->ras_late_init) {
-               r = adev->umc.ras_funcs->ras_late_init(adev);
+       if (adev->umc.ras && adev->umc.ras->ras_block.ras_late_init) {
+               r = adev->umc.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->mmhub.ras_funcs &&
-           adev->mmhub.ras_funcs->ras_late_init) {
-               r = adev->mmhub.ras_funcs->ras_late_init(adev);
+       if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_late_init) {
+               r = adev->mmhub.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (!adev->gmc.xgmi.connected_to_cpu)
-               adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
-
-       if (adev->gmc.xgmi.ras_funcs &&
-           adev->gmc.xgmi.ras_funcs->ras_late_init) {
-               r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
+       if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) {
+               r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->hdp.ras_funcs &&
-           adev->hdp.ras_funcs->ras_late_init) {
-               r = adev->hdp.ras_funcs->ras_late_init(adev);
+       if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_late_init) {
+               r = adev->hdp.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->mca.mp0.ras_funcs &&
-           adev->mca.mp0.ras_funcs->ras_late_init) {
-               r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
+       if (adev->mca.mp0.ras && adev->mca.mp0.ras->ras_block.ras_late_init) {
+               r = adev->mca.mp0.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->mca.mp1.ras_funcs &&
-           adev->mca.mp1.ras_funcs->ras_late_init) {
-               r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
+       if (adev->mca.mp1.ras && adev->mca.mp1.ras->ras_block.ras_late_init) {
+               r = adev->mca.mp1.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->mca.mpio.ras_funcs &&
-           adev->mca.mpio.ras_funcs->ras_late_init) {
-               r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
+       if (adev->mca.mpio.ras && adev->mca.mpio.ras->ras_block.ras_late_init) {
+               r = adev->mca.mpio.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
@@ -497,21 +497,17 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
 
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 {
-       if (adev->umc.ras_funcs &&
-           adev->umc.ras_funcs->ras_fini)
-               adev->umc.ras_funcs->ras_fini(adev);
+       if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini)
+               adev->umc.ras->ras_block.ras_fini(adev);
 
-       if (adev->mmhub.ras_funcs &&
-           adev->mmhub.ras_funcs->ras_fini)
-               adev->mmhub.ras_funcs->ras_fini(adev);
+       if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini)
+               adev->mmhub.ras->ras_block.ras_fini(adev);
 
-       if (adev->gmc.xgmi.ras_funcs &&
-           adev->gmc.xgmi.ras_funcs->ras_fini)
-               adev->gmc.xgmi.ras_funcs->ras_fini(adev);
+       if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini)
+               adev->gmc.xgmi.ras->ras_block.ras_fini(adev);
 
-       if (adev->hdp.ras_funcs &&
-           adev->hdp.ras_funcs->ras_fini)
-               adev->hdp.ras_funcs->ras_fini(adev);
+       if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini)
+               adev->hdp.ras->ras_block.ras_fini(adev);
 }
 
        /*
@@ -831,3 +827,49 @@ void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev)
                break;
        }
 }
+
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
+{
+       struct amdgpu_bo *vram_bo = NULL;
+       uint64_t vram_gpu = 0;
+       void *vram_ptr = NULL;
+
+       int ret, size = 0x100000;
+       uint8_t cptr[10];
+
+       ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_VRAM,
+                               &vram_bo,
+                               &vram_gpu,
+                               &vram_ptr);
+       if (ret)
+               return ret;
+
+       memset(vram_ptr, 0x86, size);
+       memset(cptr, 0x86, 10);
+
+       /**
+        * Check the start, the mid, and the end of the memory if the content of
+        * each byte is the pattern "0x86". If yes, we suppose the vram bo is
+        * workable.
+        *
+        * Note: If check the each byte of whole 1M bo, it will cost too many
+        * seconds, so here, we just pick up three parts for emulation.
+        */
+       ret = memcmp(vram_ptr, cptr, 10);
+       if (ret)
+               return ret;
+
+       ret = memcmp(vram_ptr + (size / 2), cptr, 10);
+       if (ret)
+               return ret;
+
+       ret = memcmp(vram_ptr + size - 10, cptr, 10);
+       if (ret)
+               return ret;
+
+       amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
+                       &vram_ptr);
+
+       return 0;
+}
index 8458cebc6d5b83639431ef5dbb8a349bc9494111..93505bb0a36c3ca53022481ca5e55fae6c803802 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/types.h>
 
 #include "amdgpu_irq.h"
+#include "amdgpu_ras.h"
 
 /* VA hole for 48bit addresses on Vega10 */
 #define AMDGPU_GMC_HOLE_START  0x0000800000000000ULL
@@ -135,12 +136,8 @@ struct amdgpu_gmc_funcs {
        unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
 };
 
-struct amdgpu_xgmi_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       int (*query_ras_error_count)(struct amdgpu_device *adev,
-                                    void *ras_error_status);
-       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_xgmi_ras {
+       struct amdgpu_ras_block_object ras_block;
 };
 
 struct amdgpu_xgmi {
@@ -159,7 +156,7 @@ struct amdgpu_xgmi {
        struct ras_common_if *ras_if;
        bool connected_to_cpu;
        bool pending_reset;
-       const struct amdgpu_xgmi_ras_funcs *ras_funcs;
+       struct amdgpu_xgmi_ras *ras;
 };
 
 struct amdgpu_gmc {
@@ -321,6 +318,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
                              uint16_t pasid, uint64_t timestamp);
 void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
                                     uint16_t pasid);
+int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
@@ -339,4 +337,5 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
 uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
 uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
 uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
 #endif
index e0c7fbe01d93939d3df1b81dcde1cd7e4c00c420..899a47011a6796d45c8dd6b3724758430b05cce7 100644 (file)
@@ -222,26 +222,21 @@ uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
  *
  * Re-init the gart for each known BO in the GTT.
  */
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
 {
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        struct amdgpu_device *adev;
-       int r = 0;
 
        adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-               r = amdgpu_ttm_recover_gart(node->tbo);
-               if (r)
-                       break;
+               amdgpu_ttm_recover_gart(node->tbo);
        }
        spin_unlock(&mgr->lock);
 
        amdgpu_gart_invalidate_tlb(adev);
-
-       return r;
 }
 
 /**
index a766e1aad2b913183ad4c211b57e28661ccf7d38..518966a2613087649a49b1fa88f9b96519602394 100644 (file)
@@ -24,7 +24,7 @@
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
 
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_ih_if ih_info = {
index 7ec99d591584b0137e1a5fa8ad4f99c9d1905720..4af2c2a322e7d5c7597e0ce289ccafc618db7fd8 100644 (file)
  */
 #ifndef __AMDGPU_HDP_H__
 #define __AMDGPU_HDP_H__
+#include "amdgpu_ras.h"
 
-struct amdgpu_hdp_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_hdp_ras {
+       struct amdgpu_ras_block_object ras_block;
 };
 
 struct amdgpu_hdp_funcs {
@@ -43,9 +40,9 @@ struct amdgpu_hdp_funcs {
 struct amdgpu_hdp {
        struct ras_common_if                    *ras_if;
        const struct amdgpu_hdp_funcs           *funcs;
-       const struct amdgpu_hdp_ras_funcs       *ras_funcs;
+       struct amdgpu_hdp_ras   *ras;
 };
 
-int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info);
 void amdgpu_hdp_ras_fini(struct amdgpu_device *adev);
 #endif /* __AMDGPU_HDP_H__ */
index f5cbc2747ac6d27caa6529368f56b1e96b083978..ea3e8c66211fd31ef66afa3185861a24de6faa3b 100644 (file)
@@ -199,13 +199,13 @@ static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
         * ack the interrupt if it is there
         */
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
-                       adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
+               if (adev->nbio.ras &&
+                   adev->nbio.ras->handle_ras_controller_intr_no_bifring)
+                       adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
 
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
-                       adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+               if (adev->nbio.ras &&
+                   adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
+                       adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
        }
 
        return ret;
index 1ebb91db22743badd64deaae581f2a9227f295bf..9f985bd463be2ee83ee7e562af7a43949a68d342 100644 (file)
@@ -87,11 +87,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
        if (adev->rmmio == NULL)
                return;
 
-       if (adev->runpm) {
-               pm_runtime_get_sync(dev->dev);
-               pm_runtime_forbid(dev->dev);
-       }
-
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
                DRM_WARN("smart shift update failed\n");
 
@@ -124,22 +119,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
        mutex_unlock(&mgpu_info.mutex);
 }
 
-static void amdgpu_get_audio_func(struct amdgpu_device *adev)
-{
-       struct pci_dev *p = NULL;
-
-       p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
-                       adev->pdev->bus->number, 1);
-       if (p) {
-               pm_runtime_get_sync(&p->dev);
-
-               pm_runtime_mark_last_busy(&p->dev);
-               pm_runtime_put_autosuspend(&p->dev);
-
-               pci_dev_put(p);
-       }
-}
-
 /**
  * amdgpu_driver_load_kms - Main load function for KMS.
  *
@@ -152,21 +131,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev)
 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 {
        struct drm_device *dev;
-       struct pci_dev *parent;
        int r, acpi_status;
 
        dev = adev_to_drm(adev);
 
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           ((flags & AMD_IS_APU) == 0) &&
-           !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
-               flags |= AMD_IS_PX;
-
-       parent = pci_upstream_bridge(adev->pdev);
-       adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
-
        /* amdgpu_device_init should report only fatal error
         * like memory allocation failure or iomapping failure,
         * or memory manager initialization failure, it must
@@ -224,58 +192,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
        if (acpi_status)
                dev_dbg(dev->dev, "Error during ACPI methods call\n");
 
-       if (adev->runpm) {
-               /* only need to skip on ATPX */
-               if (amdgpu_device_supports_px(dev))
-                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
-               /* we want direct complete for BOCO */
-               if (amdgpu_device_supports_boco(dev))
-                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
-                                               DPM_FLAG_SMART_SUSPEND |
-                                               DPM_FLAG_MAY_SKIP_RESUME);
-               pm_runtime_use_autosuspend(dev->dev);
-               pm_runtime_set_autosuspend_delay(dev->dev, 5000);
-
-               pm_runtime_allow(dev->dev);
-
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-
-               /*
-                * For runpm implemented via BACO, PMFW will handle the
-                * timing for BACO in and out:
-                *   - put ASIC into BACO state only when both video and
-                *     audio functions are in D3 state.
-                *   - pull ASIC out of BACO state when either video or
-                *     audio function is in D0 state.
-                * Also, at startup, PMFW assumes both functions are in
-                * D0 state.
-                *
-                * So if snd driver was loaded prior to amdgpu driver
-                * and audio function was put into D3 state, there will
-                * be no PMFW-aware D-state transition(D0->D3) on runpm
-                * suspend. Thus the BACO will be not correctly kicked in.
-                *
-                * Via amdgpu_get_audio_func(), the audio dev is put
-                * into D0 state. Then there will be a PMFW-aware D-state
-                * transition(D0->D3) on runpm suspend.
-                */
-               if (amdgpu_device_supports_baco(dev) &&
-                   !(adev->flags & AMD_IS_APU) &&
-                   (adev->asic_type >= CHIP_NAVI10))
-                       amdgpu_get_audio_func(adev);
-       }
-
        if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
                DRM_WARN("smart shift update failed\n");
 
 out:
-       if (r) {
-               /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
-               if (adev->rmmio && adev->runpm)
-                       pm_runtime_put_noidle(dev->dev);
+       if (r)
                amdgpu_driver_unload_kms(dev);
-       }
 
        return r;
 }
@@ -406,6 +328,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                fw_info->ver = adev->psp.toc.fw_version;
                fw_info->feature = adev->psp.toc.feature_version;
                break;
+       case AMDGPU_INFO_FW_CAP:
+               fw_info->ver = adev->psp.cap_fw_version;
+               fw_info->feature = adev->psp.cap_feature_version;
+               break;
        default:
                return -EINVAL;
        }
@@ -1268,18 +1194,20 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
        if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
                amdgpu_vce_free_handles(adev, file_priv);
 
-       amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
-
        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
-               amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
+               amdgpu_vm_bo_del(adev, fpriv->csa_va);
                fpriv->csa_va = NULL;
                amdgpu_bo_unreserve(adev->virt.csa_obj);
        }
 
        pasid = fpriv->vm.pasid;
        pd = amdgpu_bo_ref(fpriv->vm.root.bo);
+       if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
+               amdgpu_vm_bo_del(adev, fpriv->prt_va);
+               amdgpu_bo_unreserve(pd);
+       }
 
        amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
        amdgpu_vm_fini(adev, &fpriv->vm);
@@ -1427,8 +1355,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
        struct drm_amdgpu_info_firmware fw_info;
        struct drm_amdgpu_query_fw query_fw;
        struct atom_context *ctx = adev->mode_info.atom_context;
-       uint8_t smu_minor, smu_debug;
-       uint16_t smu_major;
+       uint8_t smu_program, smu_major, smu_minor, smu_debug;
        int ret, i;
 
        static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
@@ -1574,11 +1501,12 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
        ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
        if (ret)
                return ret;
-       smu_major = (fw_info.ver >> 16) & 0xffff;
+       smu_program = (fw_info.ver >> 24) & 0xff;
+       smu_major = (fw_info.ver >> 16) & 0xff;
        smu_minor = (fw_info.ver >> 8) & 0xff;
        smu_debug = (fw_info.ver >> 0) & 0xff;
-       seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n",
-                  fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug);
+       seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
+                  fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
 
        /* SDMA */
        query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
@@ -1623,6 +1551,16 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
        seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
                   fw_info.feature, fw_info.ver);
 
+       /* CAP */
+       if (adev->psp.cap_fw) {
+               query_fw.fw_type = AMDGPU_INFO_FW_CAP;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
+                               fw_info.feature, fw_info.ver);
+       }
+
        seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
 
        return 0;
index ce538f4819f925c1b9c5afffc591c21c6c14e0bd..52a60c2316a2ef363629e4e41ac4026fb0405b83 100644 (file)
@@ -74,20 +74,23 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
 int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
                             struct amdgpu_mca_ras *mca_dev)
 {
+       char sysfs_name[32] = {0};
        int r;
        struct ras_ih_if ih_info = {
                .cb = NULL,
        };
-       struct ras_fs_if fs_info = {
-               .sysfs_name = mca_dev->ras_funcs->sysfs_name,
+       struct ras_fs_if fs_info= {
+               .sysfs_name = sysfs_name,
        };
 
+       snprintf(sysfs_name, sizeof(sysfs_name), "%s_err_count", mca_dev->ras->ras_block.name);
+
        if (!mca_dev->ras_if) {
                mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
                if (!mca_dev->ras_if)
                        return -ENOMEM;
-               mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
-               mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
+               mca_dev->ras_if->block = mca_dev->ras->ras_block.block;
+               mca_dev->ras_if->sub_block_index = mca_dev->ras->ras_block.sub_block_index;
                mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
        }
        ih_info.head = fs_info.head = *mca_dev->ras_if;
index c74bc7177066ec534015703dd13d3c38b3303551..be030c4031d294d3ec906e4b0c109ee006cb576c 100644 (file)
 #ifndef __AMDGPU_MCA_H__
 #define __AMDGPU_MCA_H__
 
-struct amdgpu_mca_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-       void (*query_ras_error_address)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
-       uint32_t ras_block;
-       uint32_t ras_sub_block;
-       const char* sysfs_name;
+struct amdgpu_mca_ras_block {
+       struct amdgpu_ras_block_object ras_block;
 };
 
 struct amdgpu_mca_ras {
        struct ras_common_if *ras_if;
-       const struct amdgpu_mca_ras_funcs *ras_funcs;
+       struct amdgpu_mca_ras_block *ras;
 };
 
 struct amdgpu_mca_funcs {
index 24297dc51434b2a07ce1b1f90cec1261449ecad7..f9b5472a75d70307d46e5b49c0138831a7c4ec23 100644 (file)
@@ -24,7 +24,7 @@
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
 
-int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_ih_if ih_info = {
index b27fcbccce2b6f27dcb70ec99527323db075863d..7deda9a3b81eba2dcd9f63dc38d617367d114908 100644 (file)
 #ifndef __AMDGPU_MMHUB_H__
 #define __AMDGPU_MMHUB_H__
 
-struct amdgpu_mmhub_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-       void (*query_ras_error_status)(struct amdgpu_device *adev);
-       void (*reset_ras_error_count)(struct amdgpu_device *adev);
-       void (*reset_ras_error_status)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras {
+       struct amdgpu_ras_block_object ras_block;
 };
 
 struct amdgpu_mmhub_funcs {
@@ -50,10 +44,10 @@ struct amdgpu_mmhub_funcs {
 struct amdgpu_mmhub {
        struct ras_common_if *ras_if;
        const struct amdgpu_mmhub_funcs *funcs;
-       const struct amdgpu_mmhub_ras_funcs *ras_funcs;
+       struct amdgpu_mmhub_ras  *ras;
 };
 
-int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info);
 void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev);
 #endif
 
index 6afb02fef8cfb786c76d74d50b588bcdb6108e9b..6ace2e390e777e8ef953a2c98ba300345db39cb5 100644 (file)
@@ -22,7 +22,7 @@
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
 
-int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_ih_if ih_info = {
index 843052205bd5a719c77c31561bc6e5bb3d8a16cd..4afb76d3cd9710664220b3d9833e72dacbfe06ba 100644 (file)
@@ -47,15 +47,12 @@ struct nbio_hdp_flush_reg {
        u32 ref_and_mask_sdma7;
 };
 
-struct amdgpu_nbio_ras_funcs {
+struct amdgpu_nbio_ras {
+       struct amdgpu_ras_block_object ras_block;
        void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
        void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
        int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
        int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_nbio_funcs {
@@ -104,9 +101,9 @@ struct amdgpu_nbio {
        struct amdgpu_irq_src ras_err_event_athub_irq;
        struct ras_common_if *ras_if;
        const struct amdgpu_nbio_funcs *funcs;
-       const struct amdgpu_nbio_ras_funcs *ras_funcs;
+       struct amdgpu_nbio_ras  *ras;
 };
 
-int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info);
 void amdgpu_nbio_ras_fini(struct amdgpu_device *adev);
 #endif
index 5661b82d84d4641f0ce06ba4f1ad97c7aaac1b03..23c9a60693ee4cd66b7803114d6a00abf7dc4f88 100644 (file)
@@ -575,6 +575,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        if (!amdgpu_bo_support_uswc(bo->flags))
                bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
+       if (adev->ras_enabled)
+               bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+
        bo->tbo.bdev = &adev->mman.bdev;
        if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
                          AMDGPU_GEM_DOMAIN_GDS))
@@ -1303,7 +1306,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
            !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
                return;
 
-       dma_resv_lock(bo->base.resv, NULL);
+       if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+               return;
 
        r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
        if (!WARN_ON(r)) {
index dee17a0e11870618a1277545a68cfc464655a073..9bc9155cbf0660ea3db9e24a259a98712c42133c 100644 (file)
@@ -137,7 +137,8 @@ static int psp_early_init(void *handle)
                psp->autoload_supported = true;
                break;
        case IP_VERSION(11, 0, 8):
-               if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+               if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2 ||
+                   adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4)) {
                        psp_v11_0_8_set_psp_funcs(psp);
                        psp->autoload_supported = false;
                }
@@ -259,6 +260,32 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
        return ret;
 }
 
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+       struct amdgpu_device *adev = psp->adev;
+       int ret = 0;
+
+       switch (adev->ip_versions[MP0_HWIP][0]) {
+       case IP_VERSION(9, 0, 0):
+               ret = psp_init_cap_microcode(psp, "vega10");
+               break;
+       case IP_VERSION(11, 0, 9):
+               ret = psp_init_cap_microcode(psp, "navi12");
+               break;
+       case IP_VERSION(11, 0, 7):
+               ret = psp_init_cap_microcode(psp, "sienna_cichlid");
+               break;
+       case IP_VERSION(13, 0, 2):
+               ret = psp_init_ta_microcode(psp, "aldebaran");
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       return ret;
+}
+
 static int psp_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -273,19 +300,13 @@ static int psp_sw_init(void *handle)
                ret = -ENOMEM;
        }
 
-       if (!amdgpu_sriov_vf(adev)) {
+       if (amdgpu_sriov_vf(adev))
+               ret = psp_init_sriov_microcode(psp);
+       else
                ret = psp_init_microcode(psp);
-               if (ret) {
-                       DRM_ERROR("Failed to load psp firmware!\n");
-                       return ret;
-               }
-       } else if (amdgpu_sriov_vf(adev) &&
-                  adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) {
-               ret = psp_init_ta_microcode(psp, "aldebaran");
-               if (ret) {
-                       DRM_ERROR("Failed to initialize ta microcode!\n");
-                       return ret;
-               }
+       if (ret) {
+               DRM_ERROR("Failed to load psp firmware!\n");
+               return ret;
        }
 
        memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
@@ -353,6 +374,10 @@ static int psp_sw_fini(void *handle)
                release_firmware(psp->ta_fw);
                psp->ta_fw = NULL;
        }
+       if (adev->psp.cap_fw) {
+               release_firmware(psp->cap_fw);
+               psp->cap_fw = NULL;
+       }
 
        if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
            adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
@@ -491,7 +516,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
                DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
                         psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
                         psp->cmd_buf_mem->resp.status);
-               if (!timeout) {
+               /* If we load CAP FW, PSP must return 0 under SRIOV
+                * also return failure in case of timeout
+                */
+               if ((ucode && (ucode->ucode_id == AMDGPU_UCODE_ID_CAP)) || !timeout) {
                        ret = -EINVAL;
                        goto exit;
                }
@@ -914,19 +942,15 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 static int psp_ta_init_shared_buf(struct psp_context *psp,
                                  struct ta_mem_context *mem_ctx)
 {
-       int ret;
-
        /*
        * Allocate 16k memory aligned to 4k from Frame Buffer (local
        * physical) for ta to host memory
        */
-       ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
+       return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
                                      &mem_ctx->shared_bo,
                                      &mem_ctx->shared_mc_addr,
                                      &mem_ctx->shared_buf);
-
-       return ret;
 }
 
 static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
@@ -1308,6 +1332,11 @@ static void psp_ras_ta_check_status(struct psp_context *psp)
                break;
        case TA_RAS_STATUS__SUCCESS:
                break;
+       case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
+               if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
+                       dev_warn(psp->adev->dev,
+                                       "RAS WARNING: Inject error to critical region is not allowed\n");
+               break;
        default:
                dev_warn(psp->adev->dev,
                                "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
@@ -1520,7 +1549,9 @@ int psp_ras_trigger_error(struct psp_context *psp,
        if (amdgpu_ras_intr_triggered())
                return 0;
 
-       if (ras_cmd->ras_status)
+       if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
+               return -EACCES;
+       else if (ras_cmd->ras_status)
                return -EINVAL;
 
        return 0;
@@ -2051,6 +2082,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
                           enum psp_gfx_fw_type *type)
 {
        switch (ucode->ucode_id) {
+       case AMDGPU_UCODE_ID_CAP:
+               *type = GFX_FW_TYPE_CAP;
+               break;
        case AMDGPU_UCODE_ID_SDMA0:
                *type = GFX_FW_TYPE_SDMA0;
                break;
@@ -3217,6 +3251,58 @@ out:
        return err;
 }
 
+int psp_init_cap_microcode(struct psp_context *psp,
+                         const char *chip_name)
+{
+       struct amdgpu_device *adev = psp->adev;
+       char fw_name[PSP_FW_NAME_LEN];
+       const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
+       struct amdgpu_firmware_info *info = NULL;
+       int err = 0;
+
+       if (!chip_name) {
+               dev_err(adev->dev, "invalid chip name for cap microcode\n");
+               return -EINVAL;
+       }
+
+       if (!amdgpu_sriov_vf(adev)) {
+               dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
+               return -EINVAL;
+       }
+
+       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
+       err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
+       if (err) {
+               dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+               err = 0;
+               goto out;
+       }
+
+       err = amdgpu_ucode_validate(adev->psp.cap_fw);
+       if (err) {
+               dev_err(adev->dev, "fail to initialize cap microcode\n");
+               goto out;
+       }
+
+       info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+       info->ucode_id = AMDGPU_UCODE_ID_CAP;
+       info->fw = adev->psp.cap_fw;
+       cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
+               adev->psp.cap_fw->data;
+       adev->firmware.fw_size += ALIGN(
+                       le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
+       adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
+       adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
+       adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
+
+       return 0;
+
+out:
+       release_firmware(adev->psp.cap_fw);
+       adev->psp.cap_fw = NULL;
+       return err;
+}
+
 static int psp_set_clockgating_state(void *handle,
                                     enum amd_clockgating_state state)
 {
index f29afabbff1fbee6509742901466fe57ebc17478..ff7d533eb746ce5f267c9be44dc385d64d32df41 100644 (file)
@@ -306,6 +306,9 @@ struct psp_context
        /* toc firmware */
        const struct firmware           *toc_fw;
 
+       /* cap firmware */
+       const struct firmware           *cap_fw;
+
        /* fence buffer */
        struct amdgpu_bo                *fence_buf_bo;
        uint64_t                        fence_buf_mc_addr;
@@ -327,6 +330,10 @@ struct psp_context
        const struct firmware           *ta_fw;
        uint32_t                        ta_fw_version;
 
+       uint32_t                        cap_fw_version;
+       uint32_t                        cap_feature_version;
+       uint32_t                        cap_ucode_size;
+
        struct ta_context               asd_context;
        struct psp_xgmi_context         xgmi_context;
        struct psp_ras_context          ras_context;
@@ -440,6 +447,8 @@ int psp_init_sos_microcode(struct psp_context *psp,
                           const char *chip_name);
 int psp_init_ta_microcode(struct psp_context *psp,
                          const char *chip_name);
+int psp_init_cap_microcode(struct psp_context *psp,
+                         const char *chip_name);
 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
                                        uint64_t *output_ptr);
 
index 8f47c14ecbc7ee833237fc3a87a7213acfed4c63..a9c133a09be5643917901dcad5cbcd5f40c3a149 100644 (file)
@@ -75,6 +75,13 @@ const char *ras_mca_block_string[] = {
        "mca_iohc",
 };
 
+struct amdgpu_ras_block_list {
+       /* ras block link */
+       struct list_head node;
+
+       struct amdgpu_ras_block_object *ras_obj;
+};
+
 const char *get_ras_block_str(struct ras_common_if *ras_block)
 {
        if (!ras_block)
@@ -89,6 +96,9 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
        return ras_block_string[ras_block->block];
 }
 
+#define ras_block_str(_BLOCK_) \
+       (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
+
 #define ras_err_str(i) (ras_error_string[ffs(i)])
 
 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@ -155,14 +165,9 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
        }
 
        memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
-
-       err_rec.address = address;
-       err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
-       err_rec.ts = (uint64_t)ktime_get_real_seconds();
-       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-
        err_data.err_addr = &err_rec;
-       err_data.err_addr_cnt = 1;
+       amdgpu_umc_fill_error_record(&err_data, address,
+                       (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
 
        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -452,7 +457,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
        }
 
        if (ret)
-               return -EINVAL;
+               return ret;
 
        return size;
 }
@@ -866,30 +871,47 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
 }
 /* feature ctl end */
 
+static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
+               enum amdgpu_ras_block block)
+{
+       if (!block_obj)
+               return -EINVAL;
+
+       if (block_obj->block == block)
+               return 0;
 
-static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
-                                             struct ras_common_if *ras_block,
-                                             struct ras_err_data  *err_data)
+       return -EINVAL;
+}
+
+static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
+                                       enum amdgpu_ras_block block, uint32_t sub_block_index)
 {
-       switch (ras_block->sub_block_index) {
-       case AMDGPU_RAS_MCA_BLOCK__MP0:
-               if (adev->mca.mp0.ras_funcs &&
-                   adev->mca.mp0.ras_funcs->query_ras_error_count)
-                       adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_MCA_BLOCK__MP1:
-               if (adev->mca.mp1.ras_funcs &&
-                   adev->mca.mp1.ras_funcs->query_ras_error_count)
-                       adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_MCA_BLOCK__MPIO:
-               if (adev->mca.mpio.ras_funcs &&
-                   adev->mca.mpio.ras_funcs->query_ras_error_count)
-                       adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       default:
-               break;
+       struct amdgpu_ras_block_list *node, *tmp;
+       struct amdgpu_ras_block_object *obj;
+
+       if (block >= AMDGPU_RAS_BLOCK__LAST)
+               return NULL;
+
+       if (!amdgpu_ras_is_supported(adev, block))
+               return NULL;
+
+       list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+               if (!node->ras_obj) {
+                       dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+                       continue;
+               }
+
+               obj = node->ras_obj;
+               if (obj->ras_block_match) {
+                       if (obj->ras_block_match(obj, block, sub_block_index) == 0)
+                               return obj;
+               } else {
+                       if (amdgpu_ras_block_match_default(obj, block) == 0)
+                               return obj;
+               }
        }
+
+       return NULL;
 }
 
 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
@@ -901,26 +923,26 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
         * choosing right query method according to
         * whether smu support query error information
         */
-       ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc));
+       ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
        if (ret == -EOPNOTSUPP) {
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->query_ras_error_count)
-                       adev->umc.ras_funcs->query_ras_error_count(adev, err_data);
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
 
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->query_ras_error_address)
-                       adev->umc.ras_funcs->query_ras_error_address(adev, err_data);
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                   adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
        } else if (!ret) {
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_count)
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data);
+               if (adev->umc.ras &&
+                       adev->umc.ras->ecc_info_query_ras_error_count)
+                       adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
 
-               if (adev->umc.ras_funcs &&
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_address)
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data);
+               if (adev->umc.ras &&
+                       adev->umc.ras->ecc_info_query_ras_error_address)
+                       adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
        }
 }
 
@@ -928,62 +950,32 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d
 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
                                  struct ras_query_if *info)
 {
+       struct amdgpu_ras_block_object *block_obj = NULL;
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ras_err_data err_data = {0, 0, 0, NULL};
-       int i;
 
        if (!obj)
                return -EINVAL;
 
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__UMC:
+       if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
                amdgpu_ras_get_ecc_info(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__SDMA:
-               if (adev->sdma.funcs->query_ras_error_count) {
-                       for (i = 0; i < adev->sdma.num_instances; i++)
-                               adev->sdma.funcs->query_ras_error_count(adev, i,
-                                                                       &err_data);
+       } else {
+               block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
+               if (!block_obj || !block_obj->hw_ops)   {
+                       dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                                    get_ras_block_str(&info->head));
+                       return -EINVAL;
                }
-               break;
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_count)
-                       adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
-
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_status)
-                       adev->gfx.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_count)
-                       adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
-
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_status)
-                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__PCIE_BIF:
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->query_ras_error_count)
-                       adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               if (adev->gmc.xgmi.ras_funcs &&
-                   adev->gmc.xgmi.ras_funcs->query_ras_error_count)
-                       adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__HDP:
-               if (adev->hdp.ras_funcs &&
-                   adev->hdp.ras_funcs->query_ras_error_count)
-                       adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
-               break;
-       case AMDGPU_RAS_BLOCK__MCA:
-               amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
-               break;
-       default:
-               break;
+
+               if (block_obj->hw_ops->query_ras_error_count)
+                       block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+
+               if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
+                   (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
+                   (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
+                               if (block_obj->hw_ops->query_ras_error_status)
+                                       block_obj->hw_ops->query_ras_error_status(adev);
+                       }
        }
 
        obj->err_data.ue_count += err_data.ue_count;
@@ -1040,68 +1032,27 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
                enum amdgpu_ras_block block)
 {
+       struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
+
        if (!amdgpu_ras_is_supported(adev, block))
                return -EINVAL;
 
-       switch (block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->reset_ras_error_count)
-                       adev->gfx.ras_funcs->reset_ras_error_count(adev);
-
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->reset_ras_error_status)
-                       adev->gfx.ras_funcs->reset_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_count)
-                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
-
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_status)
-                       adev->mmhub.ras_funcs->reset_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__SDMA:
-               if (adev->sdma.funcs->reset_ras_error_count)
-                       adev->sdma.funcs->reset_ras_error_count(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__HDP:
-               if (adev->hdp.ras_funcs &&
-                   adev->hdp.ras_funcs->reset_ras_error_count)
-                       adev->hdp.ras_funcs->reset_ras_error_count(adev);
-               break;
-       default:
-               break;
+       if (!block_obj || !block_obj->hw_ops)   {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            ras_block_str(block));
+               return -EINVAL;
        }
 
-       return 0;
-}
-
-/* Trigger XGMI/WAFL error */
-static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
-                                struct ta_ras_trigger_error_input *block_info)
-{
-       int ret;
+       if (block_obj->hw_ops->reset_ras_error_count)
+               block_obj->hw_ops->reset_ras_error_count(adev);
 
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
-               dev_warn(adev->dev, "Failed to disallow df cstate");
-
-       if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
-               dev_warn(adev->dev, "Failed to disallow XGMI power down");
-
-       ret = psp_ras_trigger_error(&adev->psp, block_info);
-
-       if (amdgpu_ras_intr_triggered())
-               return ret;
-
-       if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
-               dev_warn(adev->dev, "Failed to allow XGMI power down");
-
-       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
-               dev_warn(adev->dev, "Failed to allow df cstate");
+       if ((block == AMDGPU_RAS_BLOCK__GFX) ||
+           (block == AMDGPU_RAS_BLOCK__MMHUB)) {
+               if (block_obj->hw_ops->reset_ras_error_status)
+                       block_obj->hw_ops->reset_ras_error_status(adev);
+       }
 
-       return ret;
+       return 0;
 }
 
 /* wrapper of psp_ras_trigger_error */
@@ -1116,11 +1067,20 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                .address = info->address,
                .value = info->value,
        };
-       int ret = 0;
+       int ret = -EINVAL;
+       struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
+                                                       info->head.block,
+                                                       info->head.sub_block_index);
 
        if (!obj)
                return -EINVAL;
 
+       if (!block_obj || !block_obj->hw_ops)   {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            get_ras_block_str(&info->head));
+               return -EINVAL;
+       }
+
        /* Calculate XGMI relative offset */
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
                block_info.address =
@@ -1128,28 +1088,15 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
                                                          block_info.address);
        }
 
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->ras_error_inject)
-                       ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
-               else
-                       ret = -EINVAL;
-               break;
-       case AMDGPU_RAS_BLOCK__UMC:
-       case AMDGPU_RAS_BLOCK__SDMA:
-       case AMDGPU_RAS_BLOCK__MMHUB:
-       case AMDGPU_RAS_BLOCK__PCIE_BIF:
-       case AMDGPU_RAS_BLOCK__MCA:
-               ret = psp_ras_trigger_error(&adev->psp, &block_info);
-               break;
-       case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
-               break;
-       default:
-               dev_info(adev->dev, "%s error injection is not supported yet\n",
-                        get_ras_block_str(&info->head));
-               ret = -EINVAL;
+       if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
+               if (block_obj->hw_ops->ras_error_inject)
+                       ret = block_obj->hw_ops->ras_error_inject(adev, info);
+       } else {
+               /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
+               if (block_obj->hw_ops->ras_error_inject)
+                       ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
+               else  /*If not defined .ras_error_inject, use default ras_error_inject*/
+                       ret = psp_ras_trigger_error(&adev->psp, &block_info);
        }
 
        if (ret)
@@ -1766,24 +1713,28 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
                                          struct ras_query_if *info)
 {
+       struct amdgpu_ras_block_object *block_obj;
        /*
         * Only two block need to query read/write
         * RspStatus at current state
         */
-       switch (info->head.block) {
-       case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.ras_funcs &&
-                   adev->gfx.ras_funcs->query_ras_error_status)
-                       adev->gfx.ras_funcs->query_ras_error_status(adev);
-               break;
-       case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->query_ras_error_status)
-                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
-               break;
-       default:
-               break;
+       if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
+               (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
+               return;
+
+       block_obj = amdgpu_ras_get_ras_block(adev,
+                                       info->head.block,
+                                       info->head.sub_block_index);
+
+       if (!block_obj || !block_obj->hw_ops) {
+               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                            get_ras_block_str(&info->head));
+               return;
        }
+
+       if (block_obj->hw_ops->query_ras_error_status)
+               block_obj->hw_ops->query_ras_error_status(adev);
+
 }
 
 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
@@ -2141,8 +2092,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
                if (ret)
                        goto free;
 
-               if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
-                       adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+               amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
        }
 
 #ifdef CONFIG_X86_MCE_AMD
@@ -2348,24 +2298,26 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        case CHIP_VEGA20:
        case CHIP_ARCTURUS:
        case CHIP_ALDEBARAN:
-               if (!adev->gmc.xgmi.connected_to_cpu)
-                       adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+               if (!adev->gmc.xgmi.connected_to_cpu) {
+                       adev->nbio.ras = &nbio_v7_4_ras;
+                       amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
+               }
                break;
        default:
                /* nbio ras is not available */
                break;
        }
 
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->init_ras_controller_interrupt) {
-               r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
+       if (adev->nbio.ras &&
+           adev->nbio.ras->init_ras_controller_interrupt) {
+               r = adev->nbio.ras->init_ras_controller_interrupt(adev);
                if (r)
                        goto release_con;
        }
 
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
-               r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
+       if (adev->nbio.ras &&
+           adev->nbio.ras->init_ras_err_event_athub_interrupt) {
+               r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
                if (r)
                        goto release_con;
        }
@@ -2377,12 +2329,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        }
        else if (adev->df.funcs &&
            adev->df.funcs->query_ras_poison_mode &&
-           adev->umc.ras_funcs &&
-           adev->umc.ras_funcs->query_ras_poison_mode) {
+           adev->umc.ras &&
+           adev->umc.ras->query_ras_poison_mode) {
                df_poison =
                        adev->df.funcs->query_ras_poison_mode(adev);
                umc_poison =
-                       adev->umc.ras_funcs->query_ras_poison_mode(adev);
+                       adev->umc.ras->query_ras_poison_mode(adev);
                /* Only poison is set in both DF and UMC, we can support it */
                if (df_poison && umc_poison)
                        con->poison_supported = true;
@@ -2585,6 +2537,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
 
 int amdgpu_ras_fini(struct amdgpu_device *adev)
 {
+       struct amdgpu_ras_block_list *ras_node, *tmp;
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
        if (!adev->ras_enabled || !con)
@@ -2603,6 +2556,12 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
        amdgpu_ras_set_context(adev, NULL);
        kfree(con);
 
+       /* Clear ras blocks from ras_list and free ras block list node */
+       list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
+               list_del(&ras_node->node);
+               kfree(ras_node);
+       }
+
        return 0;
 }
 
@@ -2717,8 +2676,6 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
        dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
                             umc_inst, ch_inst);
 
-       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
-
        /*
         * Translate UMC channel address to Physical address
         */
@@ -2730,16 +2687,10 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
                        ADDR_OF_256B_BLOCK(channel_index) |
                        OFFSET_IN_256B_BLOCK(m->addr);
 
-       err_rec.address = m->addr;
-       err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-       err_rec.ts = (uint64_t)ktime_get_real_seconds();
-       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-       err_rec.cu = 0;
-       err_rec.mem_channel = channel_index;
-       err_rec.mcumc_id = umc_inst;
-
+       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
        err_data.err_addr = &err_rec;
-       err_data.err_addr_cnt = 1;
+       amdgpu_umc_fill_error_record(&err_data, m->addr,
+                       retired_page, channel_index, umc_inst);
 
        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
@@ -2777,3 +2728,63 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
        }
 }
 #endif
+
+struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
+{
+       if (!adev)
+               return NULL;
+
+       return adev->psp.ras_context.ras;
+}
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
+{
+       if (!adev)
+               return -EINVAL;
+
+       adev->psp.ras_context.ras = ras_con;
+       return 0;
+}
+
+/* check if ras is supported on block, say, sdma, gfx */
+int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+               unsigned int block)
+{
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+       if (block >= AMDGPU_RAS_BLOCK_COUNT)
+               return 0;
+       return ras && (adev->ras_enabled & (1 << block));
+}
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
+{
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+       if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+               schedule_work(&ras->recovery_work);
+       return 0;
+}
+
+
+/* Register each ip ras block into amdgpu ras */
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+               struct amdgpu_ras_block_object *ras_block_obj)
+{
+       struct amdgpu_ras_block_list *ras_node;
+       if (!adev || !ras_block_obj)
+               return -EINVAL;
+
+       if (!amdgpu_ras_asic_supported(adev))
+               return 0;
+
+       ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
+       if (!ras_node)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&ras_node->node);
+       ras_node->ras_obj = ras_block_obj;
+       list_add_tail(&ras_node->node, &adev->ras_list);
+
+       return 0;
+}
index 1c708122d4922979fcb511b852857c70cea3cf6c..a55743b12d576edf99bfdb56e09e972344e5cad3 100644 (file)
 
 #include <linux/debugfs.h>
 #include <linux/list.h>
-#include "amdgpu.h"
-#include "amdgpu_psp.h"
 #include "ta_ras_if.h"
 #include "amdgpu_ras_eeprom.h"
 
+struct amdgpu_iv_entry;
+
 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS          (0x1 << 0)
 
 enum amdgpu_ras_block {
@@ -484,6 +484,31 @@ struct ras_debug_if {
        };
        int op;
 };
+
+struct amdgpu_ras_block_object {
+       /* block name */
+       char name[32];
+
+       enum amdgpu_ras_block block;
+
+       uint32_t sub_block_index;
+
+       int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
+                               enum amdgpu_ras_block block, uint32_t sub_block_index);
+       int (*ras_late_init)(struct amdgpu_device *adev, void *ras_info);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       const struct amdgpu_ras_block_hw_ops *hw_ops;
+};
+
+struct amdgpu_ras_block_hw_ops {
+       int  (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
+       void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
+       void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status);
+       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+       void (*reset_ras_error_status)(struct amdgpu_device *adev);
+};
+
 /* work flow
  * vbios
  * 1: ras feature enable (enabled by default)
@@ -498,19 +523,6 @@ struct ras_debug_if {
  * 8: feature disable
  */
 
-#define amdgpu_ras_get_context(adev)           ((adev)->psp.ras_context.ras)
-#define amdgpu_ras_set_context(adev, ras_con)  ((adev)->psp.ras_context.ras = (ras_con))
-
-/* check if ras is supported on block, say, sdma, gfx */
-static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
-               unsigned int block)
-{
-       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
-       if (block >= AMDGPU_RAS_BLOCK_COUNT)
-               return 0;
-       return ras && (adev->ras_enabled & (1 << block));
-}
 
 int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
 
@@ -527,15 +539,6 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
 
 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev);
 
-static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
-       if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
-               schedule_work(&ras->recovery_work);
-       return 0;
-}
-
 static inline enum ta_ras_block
 amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
        switch (block) {
@@ -667,4 +670,14 @@ const char *get_ras_block_str(struct ras_common_if *ras_block);
 
 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev);
 
+int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block);
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev);
+
+struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
+
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+                               struct amdgpu_ras_block_object *ras_block_obj);
 #endif
index 05117eda105b55a0a66e2283dbe5470760e20c35..2b844a5aafdbf1787eccbfe5ae8b6b8ab849c178 100644 (file)
@@ -194,7 +194,7 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
 
        /* i2c may be unstable in gpu reset */
        down_read(&adev->reset_sem);
-       res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
                                  control->i2c_address +
                                  control->ras_header_offset,
                                  buf, RAS_TABLE_HEADER_SIZE);
@@ -263,6 +263,7 @@ static int amdgpu_ras_eeprom_correct_header_tag(
  */
 int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
 {
+       struct amdgpu_device *adev = to_amdgpu_device(control);
        struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
        u8 csum;
        int res;
@@ -282,6 +283,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
        control->ras_num_recs = 0;
        control->ras_fri = 0;
 
+       amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_recs);
+
        amdgpu_ras_debugfs_set_ret_size(control);
 
        mutex_unlock(&control->ras_tbl_mutex);
@@ -389,7 +392,7 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
        /* i2c may be unstable in gpu reset */
        down_read(&adev->reset_sem);
        buf_size = num * RAS_TABLE_RECORD_SIZE;
-       res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
                                  control->i2c_address +
                                  RAS_INDEX_TO_OFFSET(control, fri),
                                  buf, buf_size);
@@ -548,7 +551,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
        }
 
        down_read(&adev->reset_sem);
-       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
                                 control->i2c_address +
                                 control->ras_record_offset,
                                 buf, buf_size);
@@ -644,7 +647,7 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
        /* i2c may be unstable in gpu reset */
        down_read(&adev->reset_sem);
        buf_size = num * RAS_TABLE_RECORD_SIZE;
-       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
                                 control->i2c_address +
                                 RAS_INDEX_TO_OFFSET(control, fri),
                                 buf, buf_size);
@@ -1009,7 +1012,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control
                return -ENOMEM;
        }
 
-       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
                                 control->i2c_address +
                                 control->ras_header_offset,
                                 buf, buf_size);
@@ -1045,7 +1048,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
                return 0;
 
        /* Verify i2c adapter is initialized */
-       if (!adev->pm.smu_i2c.algo)
+       if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
                return -ENOENT;
 
        if (!__get_eeprom_i2c_addr(adev, control))
@@ -1057,7 +1060,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
        mutex_init(&control->ras_tbl_mutex);
 
        /* Read the table header from EEPROM address */
-       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+       res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
                                 control->i2c_address + control->ras_header_offset,
                                 buf, RAS_TABLE_HEADER_SIZE);
        if (res < RAS_TABLE_HEADER_SIZE) {
index 00afd0dcae86cd7a24db1ec50c6087a5273afb12..3f671a62b0098e38efc37817ca2755be0007d417 100644 (file)
@@ -127,11 +127,19 @@ struct amdgpu_rlc_funcs {
        void (*reset)(struct amdgpu_device *adev);
        void (*start)(struct amdgpu_device *adev);
        void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
-       void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
-       u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
        bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
+struct amdgpu_rlcg_reg_access_ctrl {
+       uint32_t scratch_reg0;
+       uint32_t scratch_reg1;
+       uint32_t scratch_reg2;
+       uint32_t scratch_reg3;
+       uint32_t grbm_cntl;
+       uint32_t grbm_idx;
+       uint32_t spare_int;
+};
+
 struct amdgpu_rlc {
        /* for power gating */
        struct amdgpu_bo        *save_restore_obj;
@@ -191,6 +199,10 @@ struct amdgpu_rlc {
        struct amdgpu_bo        *rlc_toc_bo;
        uint64_t                rlc_toc_gpu_addr;
        void                    *rlc_toc_buf;
+
+       bool rlcg_reg_access_supported;
+       /* registers for rlcg indirect reg access */
+       struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl;
 };
 
 void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
index f8fb755e3aa64e42f1bff0830b61ed279abd58cf..eaee12ab651811dd04bba35723c313e526e2f560 100644 (file)
@@ -23,6 +23,7 @@
 
 #ifndef __AMDGPU_SDMA_H__
 #define __AMDGPU_SDMA_H__
+#include "amdgpu_ras.h"
 
 /* max number of IP instances */
 #define AMDGPU_MAX_SDMA_INSTANCES              8
@@ -50,13 +51,8 @@ struct amdgpu_sdma_instance {
        bool                    burst_nop;
 };
 
-struct amdgpu_sdma_ras_funcs {
-       int (*ras_late_init)(struct amdgpu_device *adev,
-                       void *ras_ih_info);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       int (*query_ras_error_count)(struct amdgpu_device *adev,
-                       uint32_t instance, void *ras_error_status);
-       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+struct amdgpu_sdma_ras {
+       struct amdgpu_ras_block_object ras_block;
 };
 
 struct amdgpu_sdma {
@@ -73,7 +69,7 @@ struct amdgpu_sdma {
        uint32_t                    srbm_soft_reset;
        bool                    has_page_queue;
        struct ras_common_if    *ras_if;
-       const struct amdgpu_sdma_ras_funcs      *funcs;
+       struct amdgpu_sdma_ras  *ras;
 };
 
 /*
index d855cb53c7e09569fdec6b981e36a6eee86b29ea..a48b34d4ce317a4b134e37245d8717ee0f6af9df 100644 (file)
@@ -358,11 +358,10 @@ TRACE_EVENT(amdgpu_vm_update_ptes,
                        }
        ),
        TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
-                 " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid,
+                 " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
                  __entry->vm_ctx, __entry->start, __entry->end,
                  __entry->flags, __entry->incr,  __print_array(
-                 __get_dynamic_array(dst), min(__entry->nptes, 32u), 8),
-                 __entry->nptes > 32 ? "..." : "")
+                 __get_dynamic_array(dst), __entry->nptes, 8))
 );
 
 TRACE_EVENT(amdgpu_vm_set_ptes,
index d178fbec70489523711d0c9f940ed7483d747df6..414a22dddc78d03d66729cf08ed2cb4ae5baad5c 100644 (file)
@@ -50,6 +50,7 @@
 #include <drm/ttm/ttm_range_manager.h>
 
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
 
 #include "amdgpu.h"
 #include "amdgpu_object.h"
@@ -170,10 +171,10 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
  * @bo: buffer object to map
  * @mem: memory object to map
  * @mm_cur: range to map
- * @num_pages: number of pages to map
  * @window: which GART window to use
  * @ring: DMA ring to use for the copy
  * @tmz: if we should setup a TMZ enabled mapping
+ * @size: in number of bytes to map, out number of bytes mapped
  * @addr: resulting address inside the MC address space
  *
  * Setup one of the GART windows to access a specific piece of memory or return
@@ -182,15 +183,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
 static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                                 struct ttm_resource *mem,
                                 struct amdgpu_res_cursor *mm_cur,
-                                unsigned num_pages, unsigned window,
-                                struct amdgpu_ring *ring, bool tmz,
-                                uint64_t *addr)
+                                unsigned window, struct amdgpu_ring *ring,
+                                bool tmz, uint64_t *size, uint64_t *addr)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_job *job;
-       unsigned num_dw, num_bytes;
-       struct dma_fence *fence;
+       unsigned offset, num_pages, num_dw, num_bytes;
        uint64_t src_addr, dst_addr;
+       struct dma_fence *fence;
+       struct amdgpu_job *job;
        void *cpu_addr;
        uint64_t flags;
        unsigned int i;
@@ -198,7 +198,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 
        BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
               AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-       BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
+
+       if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
+               return -EINVAL;
 
        /* Map only what can't be accessed directly */
        if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
@@ -207,10 +209,22 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                return 0;
        }
 
+
+       /*
+        * If start begins at an offset inside the page, then adjust the size
+        * and addr accordingly
+        */
+       offset = mm_cur->start & ~PAGE_MASK;
+
+       num_pages = PFN_UP(*size + offset);
+       num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
+
+       *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
+
        *addr = adev->gmc.gart_start;
        *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
                AMDGPU_GPU_PAGE_SIZE;
-       *addr += mm_cur->start & ~PAGE_MASK;
+       *addr += offset;
 
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
@@ -241,10 +255,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                dma_addr_t *dma_addr;
 
                dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
-               r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
-                                   cpu_addr);
-               if (r)
-                       goto error_free;
+               amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
        } else {
                dma_addr_t dma_address;
 
@@ -252,11 +263,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                dma_address += adev->vm_manager.vram_base_offset;
 
                for (i = 0; i < num_pages; ++i) {
-                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
-                                           &dma_address, flags, cpu_addr);
-                       if (r)
-                               goto error_free;
-
+                       amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+                                       flags, cpu_addr);
                        dma_address += PAGE_SIZE;
                }
        }
@@ -297,9 +305,6 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                               struct dma_resv *resv,
                               struct dma_fence **f)
 {
-       const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
-                                       AMDGPU_GPU_PAGE_SIZE);
-
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
        struct amdgpu_res_cursor src_mm, dst_mm;
        struct dma_fence *fence = NULL;
@@ -315,29 +320,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
 
        mutex_lock(&adev->mman.gtt_window_lock);
        while (src_mm.remaining) {
-               uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
-               uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
+               uint64_t from, to, cur_size;
                struct dma_fence *next;
-               uint32_t cur_size;
-               uint64_t from, to;
 
-               /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
-                * begins at an offset, then adjust the size accordingly
-                */
-               cur_size = max(src_page_offset, dst_page_offset);
-               cur_size = min(min3(src_mm.size, dst_mm.size, size),
-                              (uint64_t)(GTT_MAX_BYTES - cur_size));
+               /* Never copy more than 256MiB at once to avoid a timeout */
+               cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
 
                /* Map src to window 0 and dst to window 1. */
                r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
-                                         PFN_UP(cur_size + src_page_offset),
-                                         0, ring, tmz, &from);
+                                         0, ring, tmz, &cur_size, &from);
                if (r)
                        goto error;
 
                r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
-                                         PFN_UP(cur_size + dst_page_offset),
-                                         1, ring, tmz, &to);
+                                         1, ring, tmz, &cur_size, &to);
                if (r)
                        goto error;
 
@@ -396,8 +392,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
            (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
                struct dma_fence *wipe_fence = NULL;
 
-               r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
-                                      NULL, &wipe_fence);
+               r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence);
                if (r) {
                        goto error;
                } else if (wipe_fence) {
@@ -821,14 +816,13 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
 #endif
 }
 
-static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
-                               struct ttm_buffer_object *tbo,
-                               uint64_t flags)
+static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+                                struct ttm_buffer_object *tbo,
+                                uint64_t flags)
 {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
        struct ttm_tt *ttm = tbo->ttm;
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        if (amdgpu_bo_encrypted(abo))
                flags |= AMDGPU_PTE_TMZ;
@@ -836,10 +830,8 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
-               r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
-                               gtt->ttm.dma_address, flags);
-               if (r)
-                       goto gart_bind_fail;
+               amdgpu_gart_bind(adev, gtt->offset, page_idx,
+                                gtt->ttm.dma_address, flags);
 
                /* The memory type of the first page defaults to UC. Now
                 * modify the memory type to NC from the second page of
@@ -848,21 +840,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 
-               r = amdgpu_gart_bind(adev,
-                               gtt->offset + (page_idx << PAGE_SHIFT),
-                               ttm->num_pages - page_idx,
-                               &(gtt->ttm.dma_address[page_idx]), flags);
+               amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
+                                ttm->num_pages - page_idx,
+                                &(gtt->ttm.dma_address[page_idx]), flags);
        } else {
-               r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-                                    gtt->ttm.dma_address, flags);
+               amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                                gtt->ttm.dma_address, flags);
        }
-
-gart_bind_fail:
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
-
-       return r;
 }
 
 /*
@@ -878,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        uint64_t flags;
-       int r = 0;
+       int r;
 
        if (!bo_mem)
                return -EINVAL;
@@ -925,14 +909,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 
        /* bind pages into GART page tables */
        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-               gtt->ttm.dma_address, flags);
-
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
+       amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                        gtt->ttm.dma_address, flags);
        gtt->bound = true;
-       return r;
+       return 0;
 }
 
 /*
@@ -982,12 +962,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
        /* Bind pages */
        gtt->offset = (u64)tmp->start << PAGE_SHIFT;
-       r = amdgpu_ttm_gart_bind(adev, bo, flags);
-       if (unlikely(r)) {
-               ttm_resource_free(bo, &tmp);
-               return r;
-       }
-
+       amdgpu_ttm_gart_bind(adev, bo, flags);
        amdgpu_gart_invalidate_tlb(adev);
        ttm_resource_free(bo, &bo->resource);
        ttm_bo_assign_mem(bo, tmp);
@@ -1001,19 +976,16 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
  * rebind GTT pages during a GPU reset.
  */
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        uint64_t flags;
-       int r;
 
        if (!tbo->ttm)
-               return 0;
+               return;
 
        flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
-       r = amdgpu_ttm_gart_bind(adev, tbo, flags);
-
-       return r;
+       amdgpu_ttm_gart_bind(adev, tbo, flags);
 }
 
 /*
@@ -1027,7 +999,6 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr) {
@@ -1047,10 +1018,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
                return;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
-       if (r)
-               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
-                         gtt->ttm.num_pages, gtt->offset);
+       amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        gtt->bound = false;
 }
 
@@ -1168,6 +1136,26 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
        return ttm_pool_free(&adev->mman.bdev.pool, ttm);
 }
 
+/**
+ * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
+ * task
+ *
+ * @tbo: The ttm_buffer_object that contains the userptr
+ * @user_addr:  The returned value
+ */
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+                             uint64_t *user_addr)
+{
+       struct amdgpu_ttm_tt *gtt;
+
+       if (!tbo->ttm)
+               return -EINVAL;
+
+       gtt = (void *)tbo->ttm;
+       *user_addr = gtt->userptr;
+       return 0;
+}
+
 /**
  * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
  * task
@@ -1433,6 +1421,63 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
        }
 }
 
+static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
+                                       unsigned long offset, void *buf, int len, int write)
+{
+       struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+       struct amdgpu_res_cursor src_mm;
+       struct amdgpu_job *job;
+       struct dma_fence *fence;
+       uint64_t src_addr, dst_addr;
+       unsigned int num_dw;
+       int r, idx;
+
+       if (len != PAGE_SIZE)
+               return -EINVAL;
+
+       if (!adev->mman.sdma_access_ptr)
+               return -EACCES;
+
+       if (!drm_dev_enter(adev_to_drm(adev), &idx))
+               return -ENODEV;
+
+       if (write)
+               memcpy(adev->mman.sdma_access_ptr, buf, len);
+
+       num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
+       if (r)
+               goto out;
+
+       amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+       src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
+       dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+       if (write)
+               swap(src_addr, dst_addr);
+
+       amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
+
+       amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+
+       r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       if (r) {
+               amdgpu_job_free(job);
+               goto out;
+       }
+
+       if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+               r = -ETIMEDOUT;
+       dma_fence_put(fence);
+
+       if (!(r || write))
+               memcpy(buf, adev->mman.sdma_access_ptr, len);
+out:
+       drm_dev_exit(idx);
+       return r;
+}
+
 /**
  * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
  *
@@ -1457,6 +1502,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
        if (bo->resource->mem_type != TTM_PL_VRAM)
                return -EIO;
 
+       if (amdgpu_device_has_timeouts_enabled(adev) &&
+                       !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
+               return len;
+
        amdgpu_res_first(bo->resource, offset, len, &cursor);
        while (cursor.remaining) {
                size_t count, size = cursor.size;
@@ -1797,6 +1846,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
                return r;
        }
 
+       if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_GTT,
+                               &adev->mman.sdma_access_bo, NULL,
+                               &adev->mman.sdma_access_ptr))
+               DRM_WARN("Debug VRAM access will use slowpath MM access\n");
+
        return 0;
 }
 
@@ -1818,6 +1873,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
        if (adev->mman.stolen_reserved_size)
                amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
                                      NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+                                       &adev->mman.sdma_access_ptr);
        amdgpu_ttm_fw_reserve_vram_fini(adev);
 
        if (drm_dev_enter(adev_to_drm(adev), &idx)) {
@@ -1888,23 +1945,55 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
        adev->mman.buffer_funcs_enabled = enable;
 }
 
+static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+                                 bool direct_submit,
+                                 unsigned int num_dw,
+                                 struct dma_resv *resv,
+                                 bool vm_needs_flush,
+                                 struct amdgpu_job **job)
+{
+       enum amdgpu_ib_pool_type pool = direct_submit ?
+               AMDGPU_IB_POOL_DIRECT :
+               AMDGPU_IB_POOL_DELAYED;
+       int r;
+
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
+       if (r)
+               return r;
+
+       if (vm_needs_flush) {
+               (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+                                                       adev->gmc.pdb0_bo :
+                                                       adev->gart.bo);
+               (*job)->vm_needs_flush = true;
+       }
+       if (resv) {
+               r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
+                                    AMDGPU_SYNC_ALWAYS,
+                                    AMDGPU_FENCE_OWNER_UNDEFINED);
+               if (r) {
+                       DRM_ERROR("sync failed (%d).\n", r);
+                       amdgpu_job_free(*job);
+                       return r;
+               }
+       }
+       return 0;
+}
+
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
                       struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush, bool tmz)
 {
-       enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
-               AMDGPU_IB_POOL_DELAYED;
        struct amdgpu_device *adev = ring->adev;
+       unsigned num_loops, num_dw;
        struct amdgpu_job *job;
-
        uint32_t max_bytes;
-       unsigned num_loops, num_dw;
        unsigned i;
        int r;
 
-       if (direct_submit && !ring->sched.ready) {
+       if (!direct_submit && !ring->sched.ready) {
                DRM_ERROR("Trying to move memory with ring turned off.\n");
                return -EINVAL;
        }
@@ -1912,26 +2001,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
+       r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
+                                  resv, vm_needs_flush, &job);
        if (r)
                return r;
 
-       if (vm_needs_flush) {
-               job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
-                                       adev->gmc.pdb0_bo : adev->gart.bo);
-               job->vm_needs_flush = true;
-       }
-       if (resv) {
-               r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_SYNC_ALWAYS,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
-               if (r) {
-                       DRM_ERROR("sync failed (%d).\n", r);
-                       goto error_free;
-               }
-       }
-
        for (i = 0; i < num_loops; i++) {
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
 
@@ -1961,77 +2035,35 @@ error_free:
        return r;
 }
 
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
-                      uint32_t src_data,
-                      struct dma_resv *resv,
-                      struct dma_fence **fence)
+static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
+                              uint64_t dst_addr, uint32_t byte_count,
+                              struct dma_resv *resv,
+                              struct dma_fence **fence,
+                              bool vm_needs_flush)
 {
-       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-       uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
-       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-
-       struct amdgpu_res_cursor cursor;
+       struct amdgpu_device *adev = ring->adev;
        unsigned int num_loops, num_dw;
-       uint64_t num_bytes;
-
        struct amdgpu_job *job;
+       uint32_t max_bytes;
+       unsigned int i;
        int r;
 
-       if (!adev->mman.buffer_funcs_enabled) {
-               DRM_ERROR("Trying to clear memory with ring turned off.\n");
-               return -EINVAL;
-       }
-
-       if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
-               DRM_ERROR("Trying to clear preemptible memory.\n");
-               return -EINVAL;
-       }
-
-       if (bo->tbo.resource->mem_type == TTM_PL_TT) {
-               r = amdgpu_ttm_alloc_gart(&bo->tbo);
-               if (r)
-                       return r;
-       }
-
-       num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
-       num_loops = 0;
-
-       amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
-       while (cursor.remaining) {
-               num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
-               amdgpu_res_next(&cursor, cursor.size);
-       }
-       num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
-
-       /* for IB padding */
-       num_dw += 64;
-
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
-                                    &job);
+       max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+       num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
+       num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
+       r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
+                                  &job);
        if (r)
                return r;
 
-       if (resv) {
-               r = amdgpu_sync_resv(adev, &job->sync, resv,
-                                    AMDGPU_SYNC_ALWAYS,
-                                    AMDGPU_FENCE_OWNER_UNDEFINED);
-               if (r) {
-                       DRM_ERROR("sync failed (%d).\n", r);
-                       goto error_free;
-               }
-       }
-
-       amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
-       while (cursor.remaining) {
-               uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
-               uint64_t dst_addr = cursor.start;
+       for (i = 0; i < num_loops; i++) {
+               uint32_t cur_size = min(byte_count, max_bytes);
 
-               dst_addr += amdgpu_ttm_domain_start(adev,
-                                                   bo->tbo.resource->mem_type);
                amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
                                        cur_size);
 
-               amdgpu_res_next(&cursor, cur_size);
+               dst_addr += cur_size;
+               byte_count -= cur_size;
        }
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -2048,6 +2080,55 @@ error_free:
        return r;
 }
 
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+                       uint32_t src_data,
+                       struct dma_resv *resv,
+                       struct dma_fence **f)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+       struct dma_fence *fence = NULL;
+       struct amdgpu_res_cursor dst;
+       int r;
+
+       if (!adev->mman.buffer_funcs_enabled) {
+               DRM_ERROR("Trying to clear memory with ring turned off.\n");
+               return -EINVAL;
+       }
+
+       amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
+
+       mutex_lock(&adev->mman.gtt_window_lock);
+       while (dst.remaining) {
+               struct dma_fence *next;
+               uint64_t cur_size, to;
+
+               /* Never fill more than 256MiB at once to avoid timeouts */
+               cur_size = min(dst.size, 256ULL << 20);
+
+               r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
+                                         1, ring, false, &cur_size, &to);
+               if (r)
+                       goto error;
+
+               r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
+                                       &next, true);
+               if (r)
+                       goto error;
+
+               dma_fence_put(fence);
+               fence = next;
+
+               amdgpu_res_next(&dst, cur_size);
+       }
+error:
+       mutex_unlock(&adev->mman.gtt_window_lock);
+       if (f)
+               *f = dma_fence_get(fence);
+       dma_fence_put(fence);
+       return r;
+}
+
 /**
  * amdgpu_ttm_evict_resources - evict memory buffers
  * @adev: amdgpu device object
index f8f48be16d80602bf080b61f57f0f32bca2b0498..0e4ecc77db3f44dcec63d2c34354b9a406ae54bc 100644 (file)
@@ -98,6 +98,10 @@ struct amdgpu_mman {
        u64             fw_vram_usage_size;
        struct amdgpu_bo        *fw_vram_usage_reserved_bo;
        void            *fw_vram_usage_va;
+
+       /* PAGE_SIZE'd BO for process memory r/w over SDMA. */
+       struct amdgpu_bo        *sdma_access_bo;
+       void                    *sdma_access_ptr;
 };
 
 struct amdgpu_copy_mem {
@@ -115,7 +119,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
 
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
 
 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
 
@@ -158,7 +162,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        struct dma_fence **fence);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
@@ -177,6 +181,8 @@ static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
 #endif
 
 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+                             uint64_t *user_addr);
 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
                              uint64_t addr, uint32_t flags);
 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
index 7c2538db3cd52be04f86e8f47d8d0e0813833218..428f4df184d083a219b93437f2f756cb17bb35f6 100644 (file)
@@ -378,6 +378,7 @@ enum AMDGPU_UCODE_ID {
        AMDGPU_UCODE_ID_VCN0_RAM,
        AMDGPU_UCODE_ID_VCN1_RAM,
        AMDGPU_UCODE_ID_DMCUB,
+       AMDGPU_UCODE_ID_CAP,
        AMDGPU_UCODE_ID_MAXIMUM,
 };
 
index 46264a4002f78f9428d0f1000a34420bdc1f4ef5..ff7805beda384c3d730803a0bb466f3a54d18c15 100644 (file)
@@ -21,7 +21,7 @@
  *
  */
 
-#include "amdgpu_ras.h"
+#include "amdgpu.h"
 
 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                void *ras_error_status,
@@ -33,14 +33,14 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
        int ret = 0;
 
        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-       ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc));
+       ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
        if (ret == -EOPNOTSUPP) {
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->query_ras_error_count)
-                   adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                   adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+                   adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
 
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->query_ras_error_address &&
+               if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+                   adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
                    adev->umc.max_ras_err_cnt_per_query) {
                        err_data->err_addr =
                                kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -56,15 +56,15 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                        /* umc query_ras_error_address is also responsible for clearing
                         * error status
                         */
-                       adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
+                       adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
                }
        } else if (!ret) {
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->ecc_info_query_ras_error_count)
-                   adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status);
+               if (adev->umc.ras &&
+                   adev->umc.ras->ecc_info_query_ras_error_count)
+                   adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
 
-               if (adev->umc.ras_funcs &&
-                   adev->umc.ras_funcs->ecc_info_query_ras_error_address &&
+               if (adev->umc.ras &&
+                   adev->umc.ras->ecc_info_query_ras_error_address &&
                    adev->umc.max_ras_err_cnt_per_query) {
                        err_data->err_addr =
                                kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -80,7 +80,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                        /* umc query_ras_error_address is also responsible for clearing
                         * error status
                         */
-                       adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status);
+                       adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
                }
        }
 
@@ -96,8 +96,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                                                err_data->err_addr_cnt);
                        amdgpu_ras_save_bad_pages(adev);
 
-                       if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
-                               adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
+                       amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
                }
 
                if (reset)
@@ -137,7 +136,7 @@ static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
        return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
 }
 
-int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_fs_if fs_info = {
@@ -173,9 +172,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
        }
 
        /* ras init of specific umc version */
-       if (adev->umc.ras_funcs &&
-           adev->umc.ras_funcs->err_cnt_init)
-               adev->umc.ras_funcs->err_cnt_init(adev);
+       if (adev->umc.ras &&
+           adev->umc.ras->err_cnt_init)
+               adev->umc.ras->err_cnt_init(adev);
 
        return 0;
 
@@ -219,3 +218,24 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
        amdgpu_ras_interrupt_dispatch(adev, &ih_data);
        return 0;
 }
+
+void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+               uint64_t err_addr,
+               uint64_t retired_page,
+               uint32_t channel_index,
+               uint32_t umc_inst)
+{
+       struct eeprom_table_record *err_rec =
+               &err_data->err_addr[err_data->err_addr_cnt];
+
+       err_rec->address = err_addr;
+       /* page frame address is saved */
+       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+       err_rec->ts = (uint64_t)ktime_get_real_seconds();
+       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+       err_rec->cu = 0;
+       err_rec->mem_channel = channel_index;
+       err_rec->mcumc_id = umc_inst;
+
+       err_data->err_addr_cnt++;
+}
index b72194e8bfe5310d2398d8feca0e41263bb3145d..4db0526d0be49f14b96c551f0e6337f9ccdf5c76 100644 (file)
@@ -20,6 +20,7 @@
  */
 #ifndef __AMDGPU_UMC_H__
 #define __AMDGPU_UMC_H__
+#include "amdgpu_ras.h"
 
 /*
  * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
 #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
 #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
 
-struct amdgpu_umc_ras_funcs {
+struct amdgpu_umc_ras {
+       struct amdgpu_ras_block_object ras_block;
        void (*err_cnt_init)(struct amdgpu_device *adev);
-       int (*ras_late_init)(struct amdgpu_device *adev);
-       void (*ras_fini)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-       void (*query_ras_error_address)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
        bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
        void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
                                      void *ras_error_status);
@@ -73,10 +69,10 @@ struct amdgpu_umc {
        struct ras_common_if *ras_if;
 
        const struct amdgpu_umc_funcs *funcs;
-       const struct amdgpu_umc_ras_funcs *ras_funcs;
+       struct amdgpu_umc_ras *ras;
 };
 
-int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info);
 void amdgpu_umc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
                void *ras_error_status,
@@ -84,4 +80,9 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
                struct amdgpu_irq_src *source,
                struct amdgpu_iv_entry *entry);
+void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+               uint64_t err_addr,
+               uint64_t retired_page,
+               uint32_t channel_index,
+               uint32_t umc_inst);
 #endif
index 6f8de11a17f12e3cde1f2a16fa25ecae2b914bbf..9cc23b220537eb888f1d840e638f3a60b3ce711f 100644 (file)
@@ -834,6 +834,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
        handle = msg[2];
 
        if (handle == 0) {
+               amdgpu_bo_kunmap(bo);
                DRM_ERROR("Invalid UVD handle!\n");
                return -EINVAL;
        }
@@ -892,6 +893,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
        }
 
+       amdgpu_bo_kunmap(bo);
        return -EINVAL;
 }
 
index 07bc0f5047130fc7861f5aa90664cad080674356..e1288901beb6ffb60bde6e16f2ef539f0145aa3c 100644 (file)
@@ -820,3 +820,148 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
                }
        }
 }
+
+static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+                                                u32 acc_flags, u32 hwip,
+                                                bool write, u32 *rlcg_flag)
+{
+       bool ret = false;
+
+       switch (hwip) {
+       case GC_HWIP:
+               if (amdgpu_sriov_reg_indirect_gc(adev)) {
+                       *rlcg_flag =
+                               write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
+                       ret = true;
+               /* only in new version, AMDGPU_REGS_NO_KIQ and
+                * AMDGPU_REGS_RLC are enabled simultaneously */
+               } else if ((acc_flags & AMDGPU_REGS_RLC) &&
+                          !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
+                       *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
+                       ret = true;
+               }
+               break;
+       case MMHUB_HWIP:
+               if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+                   (acc_flags & AMDGPU_REGS_RLC) && write) {
+                       *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
+                       ret = true;
+               }
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+       struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+       uint32_t timeout = 50000;
+       uint32_t i, tmp;
+       uint32_t ret = 0;
+       static void *scratch_reg0;
+       static void *scratch_reg1;
+       static void *scratch_reg2;
+       static void *scratch_reg3;
+       static void *spare_int;
+
+       if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+               dev_err(adev->dev,
+                       "indirect registers access through rlcg is not available\n");
+               return 0;
+       }
+
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+       scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+       scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+       scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+       if (reg_access_ctrl->spare_int)
+               spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+
+       if (offset == reg_access_ctrl->grbm_cntl) {
+               /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
+               writel(v, scratch_reg2);
+               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+       } else if (offset == reg_access_ctrl->grbm_idx) {
+               /* if the target reg offset is grbm_idx, write to scratch_reg3 */
+               writel(v, scratch_reg3);
+               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+       } else {
+               /*
+                * SCRATCH_REG0         = read/write value
+                * SCRATCH_REG1[30:28]  = command
+                * SCRATCH_REG1[19:0]   = address in dword
+                * SCRATCH_REG1[26:24]  = Error reporting
+                */
+               writel(v, scratch_reg0);
+               writel((offset | flag), scratch_reg1);
+               if (reg_access_ctrl->spare_int)
+                       writel(1, spare_int);
+
+               for (i = 0; i < timeout; i++) {
+                       tmp = readl(scratch_reg1);
+                       if (!(tmp & flag))
+                               break;
+                       udelay(10);
+               }
+
+               if (i >= timeout) {
+                       if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
+                               if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
+                                       dev_err(adev->dev,
+                                               "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
+                               } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
+                                       dev_err(adev->dev,
+                                               "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
+                               } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
+                                       dev_err(adev->dev,
+                                               "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
+                               } else {
+                                       dev_err(adev->dev,
+                                               "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
+                               }
+                       } else {
+                               dev_err(adev->dev,
+                                       "timeout: rlcg faled to program reg: 0x%05x\n", offset);
+                       }
+               }
+       }
+
+       ret = readl(scratch_reg0);
+       return ret;
+}
+
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+                      u32 offset, u32 value,
+                      u32 acc_flags, u32 hwip)
+{
+       u32 rlcg_flag;
+
+       if (!amdgpu_sriov_runtime(adev) &&
+           amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+               amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+               return;
+       }
+
+       if (acc_flags & AMDGPU_REGS_NO_KIQ)
+               WREG32_NO_KIQ(offset, value);
+       else
+               WREG32(offset, value);
+}
+
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+                     u32 offset, u32 acc_flags, u32 hwip)
+{
+       u32 rlcg_flag;
+
+       if (!amdgpu_sriov_runtime(adev) &&
+           amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+               return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+
+       if (acc_flags & AMDGPU_REGS_NO_KIQ)
+               return RREG32_NO_KIQ(offset);
+       else
+               return RREG32(offset);
+}
index 9adfb8d63280ab5ddf2d399a641e1d1c52855583..645093610aa0402fb6dc0bdb4b1b59e0b8aa9e9b 100644 (file)
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/* flags for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_GC_WRITE_LEGACY    (0x8 << 28)
+#define AMDGPU_RLCG_GC_WRITE           (0x0 << 28)
+#define AMDGPU_RLCG_GC_READ            (0x1 << 28)
+#define AMDGPU_RLCG_MMHUB_WRITE        (0x2 << 28)
+
+/* error code for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_VFGATE_DISABLED            0x4000000
+#define AMDGPU_RLCG_WRONG_OPERATION_TYPE       0x2000000
+#define AMDGPU_RLCG_REG_NOT_IN_RANGE           0x1000000
+
 /* all asic after AI use this offset */
 #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
 /* tonga/fiji use this offset */
@@ -275,13 +286,18 @@ struct amdgpu_video_codec_info;
 (amdgpu_sriov_vf((adev)) && \
        ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
 
+#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
+        (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+
 #define amdgpu_passthrough(adev) \
 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
 
 static inline bool is_virtual_machine(void)
 {
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86)
        return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#elif defined(CONFIG_ARM64)
+       return !is_kernel_in_hyp_mode();
 #else
        return false;
 #endif
@@ -293,7 +309,6 @@ static inline bool is_virtual_machine(void)
        ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
 #define amdgpu_sriov_is_normal(adev) \
        ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
-
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
@@ -321,4 +336,9 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
                        struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
                        struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+                      u32 offset, u32 value,
+                      u32 acc_flags, u32 hwip);
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+                     u32 offset, u32 acc_flags, u32 hwip);
 #endif
index b37fc7d7d2c76392ad7cfd9f3d6c6cc7948b7622..37acd8911168138c9cd17a9b81c6b7015cb2ace9 100644 (file)
@@ -375,6 +375,8 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
        if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
                return;
 
+       dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+
        vm->bulk_moveable = false;
        if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
                amdgpu_vm_bo_relocated(base);
@@ -1634,7 +1636,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                        nptes = max(nptes, 1u);
 
                        trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
-                                                   nptes, dst, incr, upd_flags,
+                                                   min(nptes, 32u), dst, incr, upd_flags,
                                                    vm->task_info.pid,
                                                    vm->immediate.fence_context);
                        amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
@@ -2257,6 +2259,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        if (!bo)
                return bo_va;
 
+       dma_resv_assert_held(bo->tbo.base.resv);
        if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
                bo_va->is_xgmi = true;
                /* Power up XGMI if it can be potentially used */
@@ -2634,7 +2637,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
 }
 
 /**
- * amdgpu_vm_bo_rmv - remove a bo to a specific vm
+ * amdgpu_vm_bo_del - remove a bo from a specific vm
  *
  * @adev: amdgpu_device pointer
  * @bo_va: requested bo_va
@@ -2643,7 +2646,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
  *
  * Object have to be reserved!
  */
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va)
 {
        struct amdgpu_bo_va_mapping *mapping, *next;
@@ -2651,7 +2654,10 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
        struct amdgpu_vm *vm = bo_va->base.vm;
        struct amdgpu_vm_bo_base **base;
 
+       dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+
        if (bo) {
+               dma_resv_assert_held(bo->tbo.base.resv);
                if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv)
                        vm->bulk_moveable = false;
 
index 85fcfb8c5efd1be3e16d4e0dc88ca26c900c44cc..a40a6a993bb029434db72731728f46994e556e94 100644 (file)
@@ -435,7 +435,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
                                                         uint64_t addr);
 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
index 7442095f089c72856c9cde84e36ffe3030fbe59a..fce9a13a6ba1cdecdb2c5a5f89422ea599557343 100644 (file)
@@ -281,7 +281,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
        rsv->mm_node.size = size >> PAGE_SHIFT;
 
        spin_lock(&mgr->lock);
-       list_add_tail(&mgr->reservations_pending, &rsv->node);
+       list_add_tail(&rsv->node, &mgr->reservations_pending);
        amdgpu_vram_mgr_do_reserve(&mgr->manager);
        spin_unlock(&mgr->lock);
 
index e8b8f28c2f723bc06dcd16d49489eb41c164a672..5929d6f528c9607807412da8a6f96b01e3e19a19 100644 (file)
@@ -732,7 +732,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
        return psp_xgmi_terminate(&adev->psp);
 }
 
-static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        int r;
        struct ras_ih_if ih_info = {
@@ -746,7 +746,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
-       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
 
        if (!adev->gmc.xgmi.ras_if) {
                adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -865,7 +865,7 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                                             void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -874,7 +874,7 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
        uint32_t ue_cnt = 0, ce_cnt = 0;
 
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
-               return -EINVAL;
+               return ;
 
        err_data->ue_count = 0;
        err_data->ce_count = 0;
@@ -940,17 +940,51 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
-       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev);
 
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
+}
 
-       return 0;
+/* Trigger XGMI/WAFL error */
+static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,  void *inject_if)
+{
+       int ret = 0;
+       struct ta_ras_trigger_error_input *block_info =
+                               (struct ta_ras_trigger_error_input *)inject_if;
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+               dev_warn(adev->dev, "Failed to disallow df cstate");
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+               dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+       ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+       if (amdgpu_ras_intr_triggered())
+               return ret;
+
+       if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+               dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+       if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+               dev_warn(adev->dev, "Failed to allow df cstate");
+
+       return ret;
 }
 
-const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
-       .ras_late_init = amdgpu_xgmi_ras_late_init,
-       .ras_fini = amdgpu_xgmi_ras_fini,
+struct amdgpu_ras_block_hw_ops  xgmi_ras_hw_ops = {
        .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
        .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+       .ras_error_inject = amdgpu_ras_error_inject_xgmi,
+};
+
+struct amdgpu_xgmi_ras xgmi_ras = {
+       .ras_block = {
+               .name = "xgmi",
+               .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+               .hw_ops = &xgmi_ras_hw_ops,
+               .ras_late_init = amdgpu_xgmi_ras_late_init,
+               .ras_fini = amdgpu_xgmi_ras_fini,
+       },
 };
index d2189bf7d428ef7781a3c2b02a2a4e22b0d4963c..0afca51c3c0c939249ac728398546e4d8b4bcb12 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <drm/task_barrier.h>
 #include "amdgpu_psp.h"
-
+#include "amdgpu_ras.h"
 
 struct amdgpu_hive_info {
        struct kobject kobj;
@@ -50,7 +50,7 @@ struct amdgpu_pcs_ras_field {
        uint32_t pcs_err_shift;
 };
 
-extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
+extern struct amdgpu_xgmi_ras  xgmi_ras;
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
index 3ea5578643200f2d89d72938f05f9b982c996e96..88642e7ecdf40254b73a5dd0659dbac3c435b3be 100644 (file)
@@ -68,12 +68,13 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       switch (adev->asic_type) {
-       case CHIP_VEGA10:
-       case CHIP_VEGA12:
-       case CHIP_VEGA20:
-       case CHIP_RAVEN:
-       case CHIP_RENOIR:
+       switch (adev->ip_versions[ATHUB_HWIP][0]) {
+       case IP_VERSION(9, 0, 0):
+       case IP_VERSION(9, 1, 0):
+       case IP_VERSION(9, 2, 0):
+       case IP_VERSION(9, 3, 0):
+       case IP_VERSION(9, 4, 0):
+       case IP_VERSION(1, 5, 0):
                athub_update_medium_grain_clock_gating(adev,
                                state == AMD_CG_STATE_GATE);
                athub_update_medium_grain_light_sleep(adev,
index ab6a07e5e8c4bccd639f58cee9a142683b2305db..a720436857b473986fa5f0b6d7b27abd97c92f01 100644 (file)
@@ -78,6 +78,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
                return 0;
 
        switch (adev->ip_versions[ATHUB_HWIP][0]) {
+       case IP_VERSION(1, 3, 1):
        case IP_VERSION(2, 0, 0):
        case IP_VERSION(2, 0, 2):
                athub_v2_0_update_medium_grain_clock_gating(adev,
index 2edefd10e56ce140f92e3a484db3330662e0ef39..ad8e87d3d2cb115365c0597fb47f768cf9478901 100644 (file)
@@ -74,6 +74,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
        case IP_VERSION(2, 1, 0):
        case IP_VERSION(2, 1, 1):
        case IP_VERSION(2, 1, 2):
+       case IP_VERSION(2, 4, 0):
                athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
                athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
                break;
index d1570a462a51a95088fbbbde4ad27be74f3b0465..5d5205870861cadea01f8cfeaef8b27b2750a8a6 100644 (file)
@@ -2532,7 +2532,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                break;
        }
        /* adjust pm to dpms */
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_dpm_compute_clocks(adev);
 }
 
 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
index 18a7b3bd633b5d06198b129ceb25e7ef3677a3ef..4d812b22c54f8dea522e37dd1a87b22857dde441 100644 (file)
@@ -2608,7 +2608,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                break;
        }
        /* adjust pm to dpms */
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_dpm_compute_clocks(adev);
 }
 
 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
index c7803dc2b2d533d295ec4b0f65db99da92b90f7d..b90bc2adf77843aa99e4d3513e6c7b9713be474e 100644 (file)
@@ -2424,7 +2424,7 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                break;
        }
        /* adjust pm to dpms */
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_dpm_compute_clocks(adev);
 }
 
 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
index 8318ee8339f1b9af4165b5f7c5a09590be608585..7c1379b02f943df91c502a5c6f1dc8df33fd54ae 100644 (file)
@@ -2433,7 +2433,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                break;
        }
        /* adjust pm to dpms */
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_dpm_compute_clocks(adev);
 }
 
 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
index 43c5e3ec9a39e8c59fd4fdc4ad72adc7932349f3..f4dfca013ec5122b66d3062822cca8673ff902cf 100644 (file)
@@ -458,7 +458,7 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev,
 
 #define DEFERRED_ARM_MASK      (1 << 31)
 static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
-                                   int counter_idx, uint64_t config,
+                                   uint64_t config, int counter_idx,
                                    bool is_deferred)
 {
 
@@ -476,8 +476,8 @@ static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev,
 }
 
 static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev,
-                                   int counter_idx,
-                                   uint64_t config)
+                                   uint64_t config,
+                                   int counter_idx)
 {
        return  (df_v3_6_pmc_has_counter(adev, config, counter_idx) &&
                        (adev->df_perfmon_config_assign_mask[counter_idx]
index dbe7442fb25cc4968e4237f7dbef1fbd3eab1c03..8fb4528c741f9d27efb4a4e4a0d2ebad6892d960 100644 (file)
 #define GFX10_NUM_GFX_RINGS_Sienna_Cichlid     1
 #define GFX10_MEC_HPD_SIZE     2048
 
-#define RLCG_VFGATE_DISABLED   0x4000000
-#define RLCG_WRONG_OPERATION_TYPE      0x2000000
-#define RLCG_NOT_IN_RANGE      0x1000000
-
 #define F32_CE_PROGRAM_RAM_SIZE                65536
 #define RLCG_UCODE_LOADING_START_ADDRESS       0x00002000L
 
 #define mmRLC_SPARE_INT_0_Sienna_Cichlid               0x4ca5
 #define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX      1
 
-#define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
-#define GFX_RLCG_GC_WRITE      (0x0 << 28)
-#define GFX_RLCG_GC_READ       (0x1 << 28)
-#define GFX_RLCG_MMHUB_WRITE   (0x2 << 28)
-
-#define RLCG_ERROR_REPORT_ENABLED(adev) \
-       (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
-
 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -1463,143 +1451,6 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
 };
 
-static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
-                                int write, u32 *rlcg_flag)
-{
-       switch (hwip) {
-       case GC_HWIP:
-               if (amdgpu_sriov_reg_indirect_gc(adev)) {
-                       *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
-
-                       return true;
-               /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
-               } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
-                       *rlcg_flag = GFX_RLCG_GC_WRITE_OLD;
-
-                       return true;
-               }
-
-               break;
-       case MMHUB_HWIP:
-               if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
-                   (acc_flags & AMDGPU_REGS_RLC) && write) {
-                       *rlcg_flag = GFX_RLCG_MMHUB_WRITE;
-                       return true;
-               }
-
-               break;
-       default:
-               DRM_DEBUG("Not program register by RLCG\n");
-       }
-
-       return false;
-}
-
-static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
-{
-       static void *scratch_reg0;
-       static void *scratch_reg1;
-       static void *scratch_reg2;
-       static void *scratch_reg3;
-       static void *spare_int;
-       static uint32_t grbm_cntl;
-       static uint32_t grbm_idx;
-       uint32_t i = 0;
-       uint32_t retries = 50000;
-       u32 ret = 0;
-       u32 tmp;
-
-       scratch_reg0 = adev->rmmio +
-                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
-       scratch_reg1 = adev->rmmio +
-                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
-       scratch_reg2 = adev->rmmio +
-                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
-       scratch_reg3 = adev->rmmio +
-                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
-
-       if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
-               spare_int = adev->rmmio +
-                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
-                            + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
-       } else {
-               spare_int = adev->rmmio +
-                           (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
-       }
-
-       grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
-       grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
-
-       if (offset == grbm_cntl || offset == grbm_idx) {
-               if (offset  == grbm_cntl)
-                       writel(v, scratch_reg2);
-               else if (offset == grbm_idx)
-                       writel(v, scratch_reg3);
-
-               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
-       } else {
-               writel(v, scratch_reg0);
-               writel(offset | flag, scratch_reg1);
-               writel(1, spare_int);
-
-               for (i = 0; i < retries; i++) {
-                       tmp = readl(scratch_reg1);
-                       if (!(tmp & flag))
-                               break;
-
-                       udelay(10);
-               }
-
-               if (i >= retries) {
-                       if (RLCG_ERROR_REPORT_ENABLED(adev)) {
-                               if (tmp & RLCG_VFGATE_DISABLED)
-                                       pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
-                               else if (tmp & RLCG_WRONG_OPERATION_TYPE)
-                                       pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
-                               else if (tmp & RLCG_NOT_IN_RANGE)
-                                       pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
-                               else
-                                       pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
-                       } else
-                               pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
-               }
-       }
-
-       ret = readl(scratch_reg0);
-
-       return ret;
-}
-
-static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
-{
-       u32 rlcg_flag;
-
-       if (!amdgpu_sriov_runtime(adev) &&
-           gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
-               gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
-               return;
-       }
-
-       if (acc_flags & AMDGPU_REGS_NO_KIQ)
-               WREG32_NO_KIQ(offset, value);
-       else
-               WREG32(offset, value);
-}
-
-static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
-{
-       u32 rlcg_flag;
-
-       if (!amdgpu_sriov_runtime(adev) &&
-           gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
-               return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
-
-       if (acc_flags & AMDGPU_REGS_NO_KIQ)
-               return RREG32_NO_KIQ(offset);
-       else
-               return RREG32(offset);
-}
-
 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
 {
        /* Pending on emulation bring up */
@@ -3790,6 +3641,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
                break;
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_10_0_cyan_skillfish,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
@@ -3968,6 +3820,7 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                if ((adev->gfx.me_fw_version >= 0x00000046) &&
                    (adev->gfx.me_feature_version >= 27) &&
                    (adev->gfx.pfp_fw_version >= 0x00000068) &&
@@ -4108,6 +3961,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
                else
                        chip_name = "cyan_skillfish";
                break;
+       case IP_VERSION(10, 1, 4):
+               chip_name = "cyan_skillfish2";
+               break;
        default:
                BUG();
        }
@@ -4448,6 +4304,30 @@ static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev)
                        (void **)&adev->gfx.rlc.cp_table_ptr);
 }
 
+static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+       struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
+       reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
+       reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
+       reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
+       reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
+       reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
+       switch (adev->ip_versions[GC_HWIP][0]) {
+               case IP_VERSION(10, 3, 0):
+                       reg_access_ctrl->spare_int =
+                               SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid);
+                       break;
+               default:
+                       reg_access_ctrl->spare_int =
+                               SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+                       break;
+       }
+       adev->gfx.rlc.rlcg_reg_access_supported = true;
+}
+
 static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
 {
        const struct cs_section_def *cs_data;
@@ -4468,6 +4348,7 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
        if (adev->gfx.rlc.funcs->update_spm_vmid)
                adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
 
+
        return 0;
 }
 
@@ -4689,6 +4570,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
                        1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
                break;
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4801,6 +4683,7 @@ static int gfx_v10_0_sw_init(void *handle)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                adev->gfx.me.num_me = 1;
                adev->gfx.me.num_pipe_per_me = 1;
                adev->gfx.me.num_queue_per_pipe = 1;
@@ -4865,10 +4748,14 @@ static int gfx_v10_0_sw_init(void *handle)
        if (r)
                return r;
 
-       r = gfx_v10_0_rlc_init(adev);
-       if (r) {
-               DRM_ERROR("Failed to init rlc BOs!\n");
-               return r;
+       if (adev->gfx.rlc.funcs) {
+               if (adev->gfx.rlc.funcs->init) {
+                       r = adev->gfx.rlc.funcs->init(adev);
+                       if (r) {
+                               dev_err(adev->dev, "Failed to init rlc BOs!\n");
+                               return r;
+                       }
+               }
        }
 
        r = gfx_v10_0_mec_init(adev);
@@ -7778,6 +7665,7 @@ static int gfx_v10_0_early_init(void *handle)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
                break;
        case IP_VERSION(10, 3, 0):
@@ -7801,6 +7689,9 @@ static int gfx_v10_0_early_init(void *handle)
        gfx_v10_0_set_gds_init(adev);
        gfx_v10_0_set_rlc_funcs(adev);
 
+       /* init rlcg reg access ctrl */
+       gfx_v10_0_init_rlcg_reg_access_ctrl(adev);
+
        return 0;
 }
 
@@ -8377,8 +8268,6 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
        .reset = gfx_v10_0_rlc_reset,
        .start = gfx_v10_0_rlc_start,
        .update_spm_vmid = gfx_v10_0_update_spm_vmid,
-       .sriov_wreg = gfx_v10_sriov_wreg,
-       .sriov_rreg = gfx_v10_sriov_rreg,
        .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
 
@@ -9537,6 +9426,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
        case IP_VERSION(10, 1, 10):
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 2):
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 4):
index 9189fb85a4dd4e7d83b50617d4988a5189ebde8e..744253be51426b5a6520bcb6568272e4d32ba0b8 100644 (file)
 #define mmGCEA_PROBE_MAP                        0x070c
 #define mmGCEA_PROBE_MAP_BASE_IDX               0
 
-#define GFX9_RLCG_GC_WRITE_OLD                 (0x8 << 28)
-#define GFX9_RLCG_GC_WRITE                     (0x0 << 28)
-#define GFX9_RLCG_GC_READ                      (0x1 << 28)
-#define GFX9_RLCG_VFGATE_DISABLED              0x4000000
-#define GFX9_RLCG_WRONG_OPERATION_TYPE         0x2000000
-#define GFX9_RLCG_NOT_IN_RANGE                 0x1000000
-
 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
@@ -746,128 +739,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
        mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
 };
 
-static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
-{
-       static void *scratch_reg0;
-       static void *scratch_reg1;
-       static void *scratch_reg2;
-       static void *scratch_reg3;
-       static void *spare_int;
-       static uint32_t grbm_cntl;
-       static uint32_t grbm_idx;
-       uint32_t i = 0;
-       uint32_t retries = 50000;
-       u32 ret = 0;
-       u32 tmp;
-
-       scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
-       scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
-       scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4;
-       scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4;
-       spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
-
-       grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
-       grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
-
-       if (offset == grbm_cntl || offset == grbm_idx) {
-               if (offset  == grbm_cntl)
-                       writel(v, scratch_reg2);
-               else if (offset == grbm_idx)
-                       writel(v, scratch_reg3);
-
-               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
-       } else {
-               /*
-                * SCRATCH_REG0         = read/write value
-                * SCRATCH_REG1[30:28]  = command
-                * SCRATCH_REG1[19:0]   = address in dword
-                * SCRATCH_REG1[26:24]  = Error reporting
-                */
-               writel(v, scratch_reg0);
-               writel(offset | flag, scratch_reg1);
-               writel(1, spare_int);
-
-               for (i = 0; i < retries; i++) {
-                       tmp = readl(scratch_reg1);
-                       if (!(tmp & flag))
-                               break;
-
-                       udelay(10);
-               }
-
-               if (i >= retries) {
-                       if (amdgpu_sriov_reg_indirect_gc(adev)) {
-                               if (tmp & GFX9_RLCG_VFGATE_DISABLED)
-                                       pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset);
-                               else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE)
-                                       pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset);
-                               else if (tmp & GFX9_RLCG_NOT_IN_RANGE)
-                                       pr_err("The register is not in range, program reg:0x%05x failed!\n", offset);
-                               else
-                                       pr_err("Unknown error type, program reg:0x%05x failed!\n", offset);
-                       } else
-                               pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset);
-               }
-       }
-
-       ret = readl(scratch_reg0);
-
-       return ret;
-}
-
-static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
-                               int write, u32 *rlcg_flag)
-{
-
-       switch (hwip) {
-       case GC_HWIP:
-               if (amdgpu_sriov_reg_indirect_gc(adev)) {
-                       *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ;
-
-                       return true;
-               /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
-               } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
-                       *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD;
-                       return true;
-               }
-
-               break;
-       default:
-               return false;
-       }
-
-       return false;
-}
-
-static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
-{
-       u32 rlcg_flag;
-
-       if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
-               return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag);
-
-       if (acc_flags & AMDGPU_REGS_NO_KIQ)
-               return RREG32_NO_KIQ(offset);
-       else
-               return RREG32(offset);
-}
-
-static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
-                              u32 value, u32 acc_flags, u32 hwip)
-{
-       u32 rlcg_flag;
-
-       if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
-               gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag);
-               return;
-       }
-
-       if (acc_flags & AMDGPU_REGS_NO_KIQ)
-               WREG32_NO_KIQ(offset, value);
-       else
-               WREG32(offset, value);
-}
-
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -882,7 +753,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
-static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                          void *ras_error_status);
 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
                                     void *inject_if);
@@ -2008,6 +1879,21 @@ static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
                return 4;
 }
 
+static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
+{
+       struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
+       reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
+       reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
+       reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
+       reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
+       reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
+       reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
+       adev->gfx.rlc.rlcg_reg_access_supported = true;
+}
+
 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
 {
        const struct cs_section_def *cs_data;
@@ -2197,12 +2083,16 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
 };
 
-static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
-       .ras_late_init = amdgpu_gfx_ras_late_init,
-       .ras_fini = amdgpu_gfx_ras_fini,
-       .ras_error_inject = &gfx_v9_0_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
+const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
+               .ras_error_inject = &gfx_v9_0_ras_error_inject,
+               .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+               .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
+};
+
+static struct amdgpu_gfx_ras gfx_v9_0_ras = {
+       .ras_block = {
+               .hw_ops = &gfx_v9_0_ras_ops,
+       },
 };
 
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2231,7 +2121,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                DRM_INFO("fix gfx.config for vega12\n");
                break;
        case IP_VERSION(9, 4, 0):
-               adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
+               adev->gfx.ras = &gfx_v9_0_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2258,7 +2148,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case IP_VERSION(9, 4, 1):
-               adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
+               adev->gfx.ras = &gfx_v9_4_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2279,7 +2169,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                gb_addr_config |= 0x22010042;
                break;
        case IP_VERSION(9, 4, 2):
-               adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
+               adev->gfx.ras = &gfx_v9_4_2_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2298,6 +2188,25 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                break;
        }
 
+       if (adev->gfx.ras) {
+               err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
+               if (err) {
+                       DRM_ERROR("Failed to register gfx ras block!\n");
+                       return err;
+               }
+
+               strcpy(adev->gfx.ras->ras_block.name,"gfx");
+               adev->gfx.ras->ras_block.block = AMDGPU_RAS_BLOCK__GFX;
+
+               /* If not define special ras_late_init function, use gfx default ras_late_init */
+               if (!adev->gfx.ras->ras_block.ras_late_init)
+                       adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
+
+               /* If not define special ras_fini function, use gfx default ras_fini */
+               if (!adev->gfx.ras->ras_block.ras_fini)
+                       adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini;
+       }
+
        adev->gfx.config.gb_addr_config = gb_addr_config;
 
        adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
@@ -2434,10 +2343,14 @@ static int gfx_v9_0_sw_init(void *handle)
                return r;
        }
 
-       r = adev->gfx.rlc.funcs->init(adev);
-       if (r) {
-               DRM_ERROR("Failed to init rlc BOs!\n");
-               return r;
+       if (adev->gfx.rlc.funcs) {
+               if (adev->gfx.rlc.funcs->init) {
+                       r = adev->gfx.rlc.funcs->init(adev);
+                       if (r) {
+                               dev_err(adev->dev, "Failed to init rlc BOs!\n");
+                               return r;
+                       }
+               }
        }
 
        r = gfx_v9_0_mec_init(adev);
@@ -2513,9 +2426,8 @@ static int gfx_v9_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->gfx.ras_funcs &&
-           adev->gfx.ras_funcs->ras_fini)
-               adev->gfx.ras_funcs->ras_fini(adev);
+       if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini)
+               adev->gfx.ras->ras_block.ras_fini(adev);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4840,6 +4752,9 @@ static int gfx_v9_0_early_init(void *handle)
        gfx_v9_0_set_gds_init(adev);
        gfx_v9_0_set_rlc_funcs(adev);
 
+       /* init rlcg reg access ctrl */
+       gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
+
        return 0;
 }
 
@@ -4870,16 +4785,15 @@ static int gfx_v9_0_ecc_late_init(void *handle)
        if (r)
                return r;
 
-       if (adev->gfx.ras_funcs &&
-           adev->gfx.ras_funcs->ras_late_init) {
-               r = adev->gfx.ras_funcs->ras_late_init(adev);
+       if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) {
+               r = adev->gfx.ras->ras_block.ras_late_init(adev, NULL);
                if (r)
                        return r;
        }
 
-       if (adev->gfx.ras_funcs &&
-           adev->gfx.ras_funcs->enable_watchdog_timer)
-               adev->gfx.ras_funcs->enable_watchdog_timer(adev);
+       if (adev->gfx.ras &&
+           adev->gfx.ras->enable_watchdog_timer)
+               adev->gfx.ras->enable_watchdog_timer(adev);
 
        return 0;
 }
@@ -5250,8 +5164,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
        .reset = gfx_v9_0_rlc_reset,
        .start = gfx_v9_0_rlc_start,
        .update_spm_vmid = gfx_v9_0_update_spm_vmid,
-       .sriov_wreg = gfx_v9_0_sriov_wreg,
-       .sriov_rreg = gfx_v9_0_sriov_rreg,
        .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
 };
 
@@ -6819,7 +6731,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
 }
 
-static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                          void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -6828,7 +6740,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
        uint32_t reg_value;
 
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-               return -EINVAL;
+               return;
 
        err_data->ue_count = 0;
        err_data->ce_count = 0;
@@ -6857,8 +6769,6 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
        mutex_unlock(&adev->grbm_idx_mutex);
 
        gfx_v9_0_query_utc_edc_status(adev, err_data);
-
-       return 0;
 }
 
 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
index b4789dfc2bb959b7c60ec61bfcae6513873aacfb..c67e387a97f5c1c2c4949bca08affe6a3002bae9 100644 (file)
@@ -863,7 +863,7 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
                                          void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -872,7 +872,7 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
        uint32_t reg_value;
 
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-               return -EINVAL;
+               return;
 
        err_data->ue_count = 0;
        err_data->ce_count = 0;
@@ -903,7 +903,6 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
 
        gfx_v9_4_query_utc_edc_status(adev, err_data);
 
-       return 0;
 }
 
 static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
@@ -1029,11 +1028,16 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
-const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
-        .ras_late_init = amdgpu_gfx_ras_late_init,
-        .ras_fini = amdgpu_gfx_ras_fini,
-        .ras_error_inject = &gfx_v9_4_ras_error_inject,
-        .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
-        .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
-        .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+
+const struct amdgpu_ras_block_hw_ops  gfx_v9_4_ras_ops = {
+       .ras_error_inject = &gfx_v9_4_ras_error_inject,
+       .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+       .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+       .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
+
+struct amdgpu_gfx_ras gfx_v9_4_ras = {
+       .ras_block = {
+               .hw_ops = &gfx_v9_4_ras_ops,
+       },
 };
index bdd16b568021c1135c36a2e3afc793e4fcfd6ad2..ca520a767267751bf3d1accac2dc2abaa2bc8bd8 100644 (file)
@@ -24,6 +24,6 @@
 #ifndef __GFX_V9_4_H__
 #define __GFX_V9_4_H__
 
-extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
+extern struct amdgpu_gfx_ras gfx_v9_4_ras;
 
 #endif /* __GFX_V9_4_H__ */
index c4f37a1618757086d457c82410f28ff388fd9048..7653ebd0e67bd8513e167f18e95cfe11992bc428 100644 (file)
@@ -1641,14 +1641,14 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
        return 0;
 }
 
-static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
                                            void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        uint32_t sec_count = 0, ded_count = 0;
 
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
-               return -EINVAL;
+               return;
 
        err_data->ue_count = 0;
        err_data->ce_count = 0;
@@ -1661,7 +1661,6 @@ static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
        err_data->ce_count += sec_count;
        err_data->ue_count += ded_count;
 
-       return 0;
 }
 
 static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
@@ -1931,13 +1930,17 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
-const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
-       .ras_late_init = amdgpu_gfx_ras_late_init,
-       .ras_fini = amdgpu_gfx_ras_fini,
-       .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
-       .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
-       .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+struct amdgpu_ras_block_hw_ops  gfx_v9_4_2_ras_ops = {
+               .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+               .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+               .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+               .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+               .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+};
+
+struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
+       .ras_block = {
+               .hw_ops = &gfx_v9_4_2_ras_ops,
+       },
        .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
 };
index 6db1f88509afd4566601c67e0ed84bf99487368b..7584624b641cae2bea918f74e8d741a66bf6664a 100644 (file)
@@ -31,6 +31,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
 void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
 int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
 
-extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+extern struct amdgpu_gfx_ras gfx_v9_4_2_ras;
 
 #endif /* __GFX_V9_4_2_H__ */
index b4eddf6e98a6a23c861af2d4999d0c220d4a1e06..ff738e9725ee8fb15b1e18d6cb0b802c182c5cf5 100644 (file)
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
                adev->gfx.config.max_sh_per_se *
                adev->gfx.config.max_shader_engines);
 
-       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+       switch (adev->ip_versions[GC_HWIP][0]) {
+       case IP_VERSION(10, 3, 1):
+       case IP_VERSION(10, 3, 3):
                /* Get SA disabled bitmap from eFuse setting */
                efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
                efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
                disabled_sa = tmp;
 
                WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+               break;
+       default:
+               break;
        }
 }
 
index 38bb42727715d26948e4cee9dde57506a2832b68..c64e3a391c991d8d19ff6893b9b9237ff5c131f3 100644 (file)
@@ -664,11 +664,25 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
                adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
-               adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
+               adev->umc.ras = &umc_v8_7_ras;
                break;
        default:
                break;
        }
+       if (adev->umc.ras) {
+               amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
+
+               strcpy(adev->umc.ras->ras_block.name, "umc");
+               adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
+
+               /* If don't define special ras_late_init function, use default ras_late_init */
+               if (!adev->umc.ras->ras_block.ras_late_init)
+                               adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+               /* If don't define special ras_fini function, use default ras_fini */
+               if (!adev->umc.ras->ras_block.ras_fini)
+                               adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini;
+       }
 }
 
 
@@ -705,6 +719,7 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 
 static int gmc_v10_0_early_init(void *handle)
 {
+       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gmc_v10_0_set_mmhub_funcs(adev);
@@ -720,6 +735,10 @@ static int gmc_v10_0_early_init(void *handle)
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 
+       r = amdgpu_gmc_ras_early_init(adev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -862,6 +881,7 @@ static int gmc_v10_0_sw_init(void *handle)
        case IP_VERSION(10, 1, 1):
        case IP_VERSION(10, 1, 2):
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 3, 0):
        case IP_VERSION(10, 3, 2):
        case IP_VERSION(10, 3, 1):
@@ -986,14 +1006,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
@@ -1019,8 +1032,6 @@ skip_pin_bo:
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
-
        return 0;
 }
 
@@ -1043,6 +1054,12 @@ static int gmc_v10_0_hw_init(void *handle)
        if (r)
                return r;
 
+       if (amdgpu_emu_mode == 1) {
+               r = amdgpu_gmc_vram_checking(adev);
+               if (r)
+                       return r;
+       }
+
        if (adev->umc.funcs && adev->umc.funcs->init_registers)
                adev->umc.funcs->init_registers(adev);
 
@@ -1140,6 +1157,10 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3) ||
+           adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
+               return;
+
        adev->mmhub.funcs->get_clockgating(adev, flags);
 
        if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
index cd6c38e083d0d28d2f1e408337dac0a58e24e6e9..ec291d28edffd882aefb03f9fd6ee2ce54dd5f2c 100644 (file)
@@ -469,16 +469,14 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
@@ -558,7 +556,6 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
@@ -922,7 +919,10 @@ static int gmc_v6_0_hw_init(void *handle)
        if (r)
                return r;
 
-       return r;
+       if (amdgpu_emu_mode == 1)
+               return amdgpu_gmc_vram_checking(adev);
+       else
+               return r;
 }
 
 static int gmc_v6_0_hw_fini(void *handle)
index ab8adbff9e2d0e4c5fed21465c506e646686f66b..344d819b4c1b6e9b03d772adefee87ac2ac956ef 100644 (file)
@@ -613,17 +613,14 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
@@ -712,7 +709,6 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
@@ -1111,7 +1107,10 @@ static int gmc_v7_0_hw_init(void *handle)
        if (r)
                return r;
 
-       return r;
+       if (amdgpu_emu_mode == 1)
+               return amdgpu_gmc_vram_checking(adev);
+       else
+               return r;
 }
 
 static int gmc_v7_0_hw_fini(void *handle)
index 054733838292c319e602e01ad31066105c6adf57..ca9841d5669fb9829cf6471b51b918d43835a195 100644 (file)
@@ -837,17 +837,14 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
@@ -953,7 +950,6 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
@@ -1242,7 +1238,10 @@ static int gmc_v8_0_hw_init(void *handle)
        if (r)
                return r;
 
-       return r;
+       if (amdgpu_emu_mode == 1)
+               return amdgpu_gmc_vram_checking(adev);
+       else
+               return r;
 }
 
 static int gmc_v8_0_hw_fini(void *handle)
index 88c1eb9ad068460d2658877a60793c32d2ea4ca4..4595027a8c63c5c8f7ecee283a19a5b6fad3928e 100644 (file)
@@ -1202,7 +1202,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
+               adev->umc.ras = &umc_v6_1_ras;
                break;
        case IP_VERSION(6, 1, 2):
                adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1210,15 +1210,16 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
+               adev->umc.ras = &umc_v6_1_ras;
                break;
        case IP_VERSION(6, 7, 0):
-               adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
+               adev->umc.max_ras_err_cnt_per_query =
+                       UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
                adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
                adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
                if (!adev->gmc.xgmi.connected_to_cpu)
-                       adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
+                       adev->umc.ras = &umc_v6_7_ras;
                if (1 & adev->smuio.funcs->get_die_id(adev))
                        adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
                else
@@ -1227,6 +1228,21 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
        default:
                break;
        }
+
+       if (adev->umc.ras) {
+               amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
+
+               strcpy(adev->umc.ras->ras_block.name, "umc");
+               adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
+
+               /* If don't define special ras_late_init function, use default ras_late_init */
+               if (!adev->umc.ras->ras_block.ras_late_init)
+                               adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+               /* If don't define special ras_fini function, use default ras_fini */
+               if (!adev->umc.ras->ras_block.ras_fini)
+                               adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini;
+       }
 }
 
 static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
@@ -1248,18 +1264,33 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
 {
        switch (adev->ip_versions[MMHUB_HWIP][0]) {
        case IP_VERSION(9, 4, 0):
-               adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+               adev->mmhub.ras = &mmhub_v1_0_ras;
                break;
        case IP_VERSION(9, 4, 1):
-               adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+               adev->mmhub.ras = &mmhub_v9_4_ras;
                break;
        case IP_VERSION(9, 4, 2):
-               adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+               adev->mmhub.ras = &mmhub_v1_7_ras;
                break;
        default:
                /* mmhub ras is not available */
                break;
        }
+
+       if (adev->mmhub.ras) {
+               amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
+
+               strcpy(adev->mmhub.ras->ras_block.name,"mmhub");
+               adev->mmhub.ras->ras_block.block = AMDGPU_RAS_BLOCK__MMHUB;
+
+               /* If don't define special ras_late_init function, use default ras_late_init */
+               if (!adev->mmhub.ras->ras_block.ras_late_init)
+                       adev->mmhub.ras->ras_block.ras_late_init = amdgpu_mmhub_ras_late_init;
+
+               /* If don't define special ras_fini function, use default ras_fini */
+               if (!adev->mmhub.ras->ras_block.ras_fini)
+                       adev->mmhub.ras->ras_block.ras_fini = amdgpu_mmhub_ras_fini;
+       }
 }
 
 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
@@ -1269,7 +1300,8 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 
 static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
 {
-       adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
+       adev->hdp.ras = &hdp_v4_0_ras;
+       amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
 }
 
 static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
@@ -1287,6 +1319,7 @@ static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
 
 static int gmc_v9_0_early_init(void *handle)
 {
+       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
@@ -1316,6 +1349,10 @@ static int gmc_v9_0_early_init(void *handle)
        adev->gmc.private_aperture_end =
                adev->gmc.private_aperture_start + (4ULL << 30) - 1;
 
+       r = amdgpu_gmc_ras_early_init(adev);
+       if (r)
+               return r;
+
        return 0;
 }
 
@@ -1342,13 +1379,13 @@ static int gmc_v9_0_late_init(void *handle)
        }
 
        if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
-               if (adev->mmhub.ras_funcs &&
-                   adev->mmhub.ras_funcs->reset_ras_error_count)
-                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
+                   adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
+                       adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
 
-               if (adev->hdp.ras_funcs &&
-                   adev->hdp.ras_funcs->reset_ras_error_count)
-                       adev->hdp.ras_funcs->reset_ras_error_count(adev);
+               if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
+                   adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
+                       adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
        }
 
        r = amdgpu_gmc_ras_late_init(adev);
@@ -1752,14 +1789,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
@@ -1776,7 +1806,6 @@ skip_pin_bo:
        DRM_INFO("PTB located at 0x%016llX\n",
                        (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
        return 0;
 }
 
@@ -1784,7 +1813,7 @@ static int gmc_v9_0_hw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool value;
-       int i;
+       int i, r;
 
        /* The sequence of these two function calls matters.*/
        gmc_v9_0_init_golden_registers(adev);
@@ -1819,7 +1848,14 @@ static int gmc_v9_0_hw_init(void *handle)
        if (adev->umc.funcs && adev->umc.funcs->init_registers)
                adev->umc.funcs->init_registers(adev);
 
-       return gmc_v9_0_gart_enable(adev);
+       r = gmc_v9_0_gart_enable(adev);
+       if (r)
+               return r;
+
+       if (amdgpu_emu_mode == 1)
+               return amdgpu_gmc_vram_checking(adev);
+       else
+               return r;
 }
 
 /**
index eecfb1545c1edec481b4ff00d1cbb6d2a49a9e4b..6b41fcbf4875bd427adb0e3052f1e58862d37e7f 100644 (file)
@@ -150,13 +150,21 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
        WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
 }
 
-const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs = {
-       .ras_late_init = amdgpu_hdp_ras_late_init,
-       .ras_fini = amdgpu_hdp_ras_fini,
+struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = {
        .query_ras_error_count = hdp_v4_0_query_ras_error_count,
        .reset_ras_error_count = hdp_v4_0_reset_ras_error_count,
 };
 
+struct amdgpu_hdp_ras hdp_v4_0_ras = {
+       .ras_block = {
+               .name = "hdp",
+               .block = AMDGPU_RAS_BLOCK__HDP,
+               .hw_ops = &hdp_v4_0_ras_hw_ops,
+               .ras_late_init = amdgpu_hdp_ras_late_init,
+               .ras_fini = amdgpu_hdp_ras_fini,
+       },
+};
+
 const struct amdgpu_hdp_funcs hdp_v4_0_funcs = {
        .flush_hdp = hdp_v4_0_flush_hdp,
        .invalidate_hdp = hdp_v4_0_invalidate_hdp,
index dc3a1b81dd62bbccd33adea191bec8da235391b6..c44eee9282abd3ffac7702307a5b16e31bb77a42 100644 (file)
@@ -27,6 +27,6 @@
 #include "soc15_common.h"
 
 extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs;
-extern const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs;
+extern struct amdgpu_hdp_ras  hdp_v4_0_ras;
 
 #endif
index 01c242c5abc33d2ee1feee829de0b9b932fad858..41a00851b6c50fb74ad5da3ef1d3934dc7c50efc 100644 (file)
@@ -50,11 +50,16 @@ static int jpeg_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type != CHIP_YELLOW_CARP) {
-               u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+       u32 harvest;
 
+       switch (adev->ip_versions[UVD_HWIP][0]) {
+       case IP_VERSION(3, 1, 1):
+               break;
+       default:
+               harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
                if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
                        return -ENOENT;
+               break;
        }
 
        adev->jpeg.num_jpeg_inst = 1;
index 8f7107d392afba3fa6e164ca796e1c068bcf8ed3..68565262af9cf8170075b7823747beaf5efa276b 100644 (file)
@@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
                                         ras_error_status);
 }
 
-static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
+static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
 }
@@ -47,14 +47,35 @@ static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
        amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
 }
 
-const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
-       .ras_late_init = mca_v3_0_mp0_ras_late_init,
-       .ras_fini = mca_v3_0_mp0_ras_fini,
+static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
+                               enum amdgpu_ras_block block, uint32_t sub_block_index)
+{
+       if (!block_obj)
+               return -EINVAL;
+
+       if ((block_obj->block == block) &&
+               (block_obj->sub_block_index == sub_block_index)) {
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
        .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
        .query_ras_error_address = NULL,
-       .ras_block = AMDGPU_RAS_BLOCK__MCA,
-       .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0,
-       .sysfs_name = "mp0_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
+       .ras_block = {
+               .block = AMDGPU_RAS_BLOCK__MCA,
+               .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
+               .name = "mp0",
+               .hw_ops = &mca_v3_0_mp0_hw_ops,
+               .ras_block_match = mca_v3_0_ras_block_match,
+               .ras_late_init = mca_v3_0_mp0_ras_late_init,
+               .ras_fini = mca_v3_0_mp0_ras_fini,
+       },
 };
 
 static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
@@ -65,7 +86,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
                                         ras_error_status);
 }
 
-static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
+static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
 }
@@ -75,14 +96,21 @@ static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
        amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
 }
 
-const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
-       .ras_late_init = mca_v3_0_mp1_ras_late_init,
-       .ras_fini = mca_v3_0_mp1_ras_fini,
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
        .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
        .query_ras_error_address = NULL,
-       .ras_block = AMDGPU_RAS_BLOCK__MCA,
-       .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1,
-       .sysfs_name = "mp1_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
+       .ras_block = {
+               .block = AMDGPU_RAS_BLOCK__MCA,
+               .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
+               .name = "mp1",
+               .hw_ops = &mca_v3_0_mp1_hw_ops,
+               .ras_block_match = mca_v3_0_ras_block_match,
+               .ras_late_init = mca_v3_0_mp1_ras_late_init,
+               .ras_fini = mca_v3_0_mp1_ras_fini,
+       },
 };
 
 static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
@@ -93,7 +121,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
                                         ras_error_status);
 }
 
-static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
+static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev, void *ras_info)
 {
        return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
 }
@@ -103,14 +131,21 @@ static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
        amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
 }
 
-const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
-       .ras_late_init = mca_v3_0_mpio_ras_late_init,
-       .ras_fini = mca_v3_0_mpio_ras_fini,
+const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
        .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
        .query_ras_error_address = NULL,
-       .ras_block = AMDGPU_RAS_BLOCK__MCA,
-       .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO,
-       .sysfs_name = "mpio_err_count",
+};
+
+struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
+       .ras_block = {
+               .block = AMDGPU_RAS_BLOCK__MCA,
+               .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
+               .name = "mpio",
+               .hw_ops = &mca_v3_0_mpio_hw_ops,
+               .ras_block_match = mca_v3_0_ras_block_match,
+               .ras_late_init = mca_v3_0_mpio_ras_late_init,
+               .ras_fini = mca_v3_0_mpio_ras_fini,
+       },
 };
 
 
@@ -118,9 +153,12 @@ static void mca_v3_0_init(struct amdgpu_device *adev)
 {
        struct amdgpu_mca *mca = &adev->mca;
 
-       mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
-       mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
-       mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
+       mca->mp0.ras = &mca_v3_0_mp0_ras;
+       mca->mp1.ras = &mca_v3_0_mp1_ras;
+       mca->mpio.ras = &mca_v3_0_mpio_ras;
+       amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
+       amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
+       amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
 }
 
 const struct amdgpu_mca_funcs mca_v3_0_funcs = {
index 1da2ec692057ee98445620a24b1c9953604fbe2c..4c9f0c0f31168a61d74dc0f5222142e82ed6c60b 100644 (file)
@@ -774,13 +774,17 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
-       .ras_fini = amdgpu_mmhub_ras_fini,
+struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = {
        .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
 };
 
+struct amdgpu_mmhub_ras mmhub_v1_0_ras = {
+       .ras_block = {
+               .hw_ops = &mmhub_v1_0_ras_hw_ops,
+       },
+};
+
 const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
        .get_fb_location = mmhub_v1_0_get_fb_location,
        .init = mmhub_v1_0_init,
index 4661b094e00784d907f0efa22097fdb1a6452227..dae7ca48bd8b46d01b987fd11cd1f9534edce5c5 100644 (file)
@@ -24,6 +24,6 @@
 #define __MMHUB_V1_0_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v1_0_ras;
 
 #endif
index f5f7181f9af5fd1302365a4522b573576b701b13..3b901f941627ef2d0163a99efaa8d1b7443aec13 100644 (file)
@@ -1321,15 +1321,19 @@ static void mmhub_v1_7_reset_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
-       .ras_fini = amdgpu_mmhub_ras_fini,
+struct amdgpu_ras_block_hw_ops mmhub_v1_7_ras_hw_ops = {
        .query_ras_error_count = mmhub_v1_7_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
        .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
        .reset_ras_error_status = mmhub_v1_7_reset_ras_error_status,
 };
 
+struct amdgpu_mmhub_ras mmhub_v1_7_ras = {
+       .ras_block = {
+               .hw_ops = &mmhub_v1_7_ras_hw_ops,
+       },
+};
+
 const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
        .get_fb_location = mmhub_v1_7_get_fb_location,
        .init = mmhub_v1_7_init,
index a7f9dfc2469725bb6231e8855a5ef9a826cf64a9..629f49052137127aebe90e3827d98f87efb3ff96 100644 (file)
@@ -24,6 +24,6 @@
 #define __MMHUB_V1_7_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v1_7_ras;
 
 #endif
index ff49eeaf78824534c5593614ed1e93cafe8c4fa3..619106f7d23de9c42e36867759d03318ba5fe592 100644 (file)
@@ -1655,14 +1655,18 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
-       .ras_fini = amdgpu_mmhub_ras_fini,
+const struct amdgpu_ras_block_hw_ops mmhub_v9_4_ras_hw_ops = {
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
        .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
        .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
 };
 
+struct amdgpu_mmhub_ras mmhub_v9_4_ras = {
+       .ras_block = {
+               .hw_ops = &mmhub_v9_4_ras_hw_ops,
+       },
+};
+
 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .get_fb_location = mmhub_v9_4_get_fb_location,
        .init = mmhub_v9_4_init,
index 90436efa92ef28c5a488b9bea0a32024a48215d0..a48329d95f71f8a894c0127f2a064b64fbe64709 100644 (file)
@@ -24,6 +24,6 @@
 #define __MMHUB_V9_4_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
-extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
+extern struct amdgpu_mmhub_ras mmhub_v9_4_ras;
 
 #endif
index aef9d059ae52585bc84973c01060b319cf29d84a..a642c04cf17dfc54a70ba6755aa8c53984e38178 100644 (file)
@@ -544,7 +544,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
 {
        int r;
 
-       /* trigger gpu-reset by hypervisor only if TDR disbaled */
+       /* trigger gpu-reset by hypervisor only if TDR disabled */
        if (!amdgpu_gpu_recovery) {
                /* see what event we get */
                r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
index 3444332ea1104e5334a0c96438c9f0f409b26f60..44f17bbfeb6a5263fc7ad38533def27f5fee9683 100644 (file)
@@ -59,10 +59,15 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
 {
        u32 tmp;
 
-       if (adev->asic_type == CHIP_YELLOW_CARP)
+       switch (adev->ip_versions[NBIO_HWIP][0]) {
+       case IP_VERSION(7, 2, 1):
+       case IP_VERSION(7, 5, 0):
                tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC);
-       else
+               break;
+       default:
                tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+               break;
+       }
 
        tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
        tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@@ -72,20 +77,25 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev)
 
 static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable)
 {
-       if (enable)
-               if (adev->asic_type == CHIP_YELLOW_CARP)
+       switch (adev->ip_versions[NBIO_HWIP][0]) {
+       case IP_VERSION(7, 2, 1):
+       case IP_VERSION(7, 5, 0):
+               if (enable)
                        WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC,
                                BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
                                BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
                else
+                       WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
+       break;
+       default:
+               if (enable)
                        WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN,
                                BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK |
                                BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK);
-       else
-               if (adev->asic_type == CHIP_YELLOW_CARP)
-                       WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0);
                else
                        WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0);
+               break;
+       }
 }
 
 static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev)
@@ -250,7 +260,9 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
 {
        uint32_t def, data;
 
-       if (adev->asic_type == CHIP_YELLOW_CARP) {
+       switch (adev->ip_versions[NBIO_HWIP][0]) {
+       case IP_VERSION(7, 2, 1):
+       case IP_VERSION(7, 5, 0):
                def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
                if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
                        data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK;
@@ -260,8 +272,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
 
-               data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1));
-               def = data;
+               def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0,
+                       regBIF1_PCIE_TX_POWER_CTRL_1));
                if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
                        data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK |
                                BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK);
@@ -272,7 +284,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1),
                                data);
-       } else {
+               break;
+       default:
                def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2));
                if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
                        data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
@@ -285,6 +298,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
 
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data);
+               break;
        }
 }
 
@@ -352,7 +366,9 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = {
 static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
-       if (adev->asic_type == CHIP_YELLOW_CARP) {
+       switch (adev->ip_versions[NBIO_HWIP][0]) {
+       case IP_VERSION(7, 2, 1):
+       case IP_VERSION(7, 5, 0):
                def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3));
                data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3,
                        CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
@@ -361,7 +377,8 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
 
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
-       } else {
+               break;
+       default:
                def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
                data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
                        CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
@@ -370,6 +387,7 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
 
                if (def != data)
                        WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
+               break;
        }
 
        if (amdgpu_sriov_vf(adev))
index dc5e93756fea4fd8d7a193dd75774604b49cd570..39974b449341b9ffb62018cccb3220a4bbb258ae 100644 (file)
@@ -658,16 +658,25 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
                       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
 }
 
-const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = {
+       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+};
+
+struct amdgpu_nbio_ras nbio_v7_4_ras = {
+       .ras_block = {
+               .name = "pcie_bif",
+               .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
+               .hw_ops = &nbio_v7_4_ras_hw_ops,
+               .ras_late_init = amdgpu_nbio_ras_late_init,
+               .ras_fini = amdgpu_nbio_ras_fini,
+       },
        .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
        .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
        .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
        .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
-       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
-       .ras_late_init = amdgpu_nbio_ras_late_init,
-       .ras_fini = amdgpu_nbio_ras_fini,
 };
 
+
 static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
 {
        uint32_t def, data;
index cc5692db6f9808c3eeddf1546c367fcaa240afb7..7490022d79d4f799e8f26020a42db234c9989200 100644 (file)
@@ -29,6 +29,6 @@
 extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
 extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald;
 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
-extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
+extern struct amdgpu_nbio_ras nbio_v7_4_ras;
 
 #endif
index 2ec1ffb36b1fc54db2b840b2498a6963d1a36a69..5e9ab31fee6b398b3723d1f2ae57e493f39179dc 100644 (file)
@@ -258,21 +258,6 @@ static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
        return amdgpu_device_indirect_rreg64(adev, address, data, reg);
 }
 
-static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
-{
-       unsigned long flags, address, data;
-       u32 r;
-       address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
-       data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
-
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg * 4);
-       (void)RREG32(address);
-       r = RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
-}
-
 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
        unsigned long address, data;
@@ -283,21 +268,6 @@ static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
        amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
 }
 
-static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
-       unsigned long flags, address, data;
-
-       address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
-       data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
-
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg * 4);
-       (void)RREG32(address);
-       WREG32(data, v);
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-}
-
 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags, address, data;
@@ -360,38 +330,6 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
        return false;
 }
 
-static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
-                                 u8 *bios, u32 length_bytes)
-{
-       u32 *dw_ptr;
-       u32 i, length_dw;
-       u32 rom_index_offset, rom_data_offset;
-
-       if (bios == NULL)
-               return false;
-       if (length_bytes == 0)
-               return false;
-       /* APU vbios image is part of sbios image */
-       if (adev->flags & AMD_IS_APU)
-               return false;
-
-       dw_ptr = (u32 *)bios;
-       length_dw = ALIGN(length_bytes, 4) / 4;
-
-       rom_index_offset =
-               adev->smuio.funcs->get_rom_index_offset(adev);
-       rom_data_offset =
-               adev->smuio.funcs->get_rom_data_offset(adev);
-
-       /* set rom index to 0 */
-       WREG32(rom_index_offset, 0);
-       /* read out the rom data */
-       for (i = 0; i < length_dw; i++)
-               dw_ptr[i] = RREG32(rom_data_offset);
-
-       return true;
-}
-
 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
@@ -708,7 +646,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
 static const struct amdgpu_asic_funcs nv_asic_funcs =
 {
        .read_disabled_bios = &nv_read_disabled_bios,
-       .read_bios_from_rom = &nv_read_bios_from_rom,
+       .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
        .read_register = &nv_read_register,
        .reset = &nv_asic_reset,
        .reset_method = &nv_asic_reset_method,
@@ -742,8 +680,8 @@ static int nv_common_early_init(void *handle)
        adev->pcie_wreg = &nv_pcie_wreg;
        adev->pcie_rreg64 = &nv_pcie_rreg64;
        adev->pcie_wreg64 = &nv_pcie_wreg64;
-       adev->pciep_rreg = &nv_pcie_port_rreg;
-       adev->pciep_wreg = &nv_pcie_port_wreg;
+       adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
+       adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
 
        /* TODO: will add them during VCN v2 implementation */
        adev->uvd_ctx_rreg = NULL;
@@ -964,6 +902,7 @@ static int nv_common_early_init(void *handle)
                        adev->external_rev_id = adev->rev_id + 0x01;
                break;
        case IP_VERSION(10, 1, 3):
+       case IP_VERSION(10, 1, 4):
                adev->cg_flags = 0;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x82;
index dd0dce25490137f1fee57218dafd4c4c074b032f..1f276ddd26e9c2ac25ed28d8eb1d0ad775510ab3 100644 (file)
@@ -258,6 +258,7 @@ enum psp_gfx_fw_type {
        GFX_FW_TYPE_SDMA6                           = 56,   /* SDMA6                    MI      */
        GFX_FW_TYPE_SDMA7                           = 57,   /* SDMA7                    MI      */
        GFX_FW_TYPE_VCN1                            = 58,   /* VCN1                     MI      */
+       GFX_FW_TYPE_CAP                             = 62,   /* CAP_FW                           */
        GFX_FW_TYPE_REG_LIST                        = 67,   /* REG_LIST                 MI      */
        GFX_FW_TYPE_MAX
 };
index d0e76b36d4ab1a5565829592606c957e45be7d20..9518b4394a6e7863a5e9b902ec076870b018b946 100644 (file)
@@ -53,11 +53,13 @@ MODULE_FIRMWARE("amdgpu/navi14_ta.bin");
 MODULE_FIRMWARE("amdgpu/navi12_sos.bin");
 MODULE_FIRMWARE("amdgpu/navi12_asd.bin");
 MODULE_FIRMWARE("amdgpu/navi12_ta.bin");
+MODULE_FIRMWARE("amdgpu/navi12_cap.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_sos.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_asd.bin");
 MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
 MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
+MODULE_FIRMWARE("amdgpu/sienna_cichlid_cap.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
 MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
 MODULE_FIRMWARE("amdgpu/vangogh_asd.bin");
@@ -177,8 +179,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                err = psp_init_asd_microcode(psp, chip_name);
                if (err)
                        return err;
-               if (amdgpu_sriov_vf(adev))
-                       break;
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
                err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
                if (err) {
index 1ed357cb0f49014a18afa78d4b6ad42abe8295bb..01f3bcc62a6c72a927abdfdf621252119803fc95 100644 (file)
@@ -44,6 +44,7 @@
 
 MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
 MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
 MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
 MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
 
index e8e4749e9c7972491325f5dae960f6495d774555..02115d63b07132e83c63d5bbdb740e1c4ab63210 100644 (file)
@@ -1892,13 +1892,13 @@ static int sdma_v4_0_late_init(void *handle)
        sdma_v4_0_setup_ulv(adev);
 
        if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
-               if (adev->sdma.funcs &&
-                   adev->sdma.funcs->reset_ras_error_count)
-                       adev->sdma.funcs->reset_ras_error_count(adev);
+               if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+                   adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
+                       adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
        }
 
-       if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
-               return adev->sdma.funcs->ras_late_init(adev, &ih_info);
+       if (adev->sdma.ras && adev->sdma.ras->ras_block.ras_late_init)
+               return adev->sdma.ras->ras_block.ras_late_init(adev, &ih_info);
        else
                return 0;
 }
@@ -2001,8 +2001,9 @@ static int sdma_v4_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       if (adev->sdma.funcs && adev->sdma.funcs->ras_fini)
-               adev->sdma.funcs->ras_fini(adev);
+       if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
+               adev->sdma.ras->ras_block.ras_fini)
+               adev->sdma.ras->ras_block.ras_fini(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
@@ -2057,6 +2058,10 @@ static int sdma_v4_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* SMU saves SDMA state for us */
+       if (adev->in_s0ix)
+               return 0;
+
        return sdma_v4_0_hw_fini(adev);
 }
 
@@ -2064,6 +2069,10 @@ static int sdma_v4_0_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* SMU restores SDMA state for us */
+       if (adev->in_s0ix)
+               return 0;
+
        return sdma_v4_0_hw_init(adev);
 }
 
@@ -2740,7 +2749,7 @@ static void sdma_v4_0_get_ras_error_count(uint32_t value,
        }
 }
 
-static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
+static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
                        uint32_t instance, void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
@@ -2762,6 +2771,18 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 };
 
+static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,  void *ras_error_status)
+{
+       int i = 0;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
+                       dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
+                       return;
+               }
+       }
+}
+
 static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
 {
        int i;
@@ -2773,26 +2794,45 @@ static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
-static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
-       .ras_late_init = amdgpu_sdma_ras_late_init,
-       .ras_fini = amdgpu_sdma_ras_fini,
+const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = {
        .query_ras_error_count = sdma_v4_0_query_ras_error_count,
        .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
 };
 
+static struct amdgpu_sdma_ras sdma_v4_0_ras = {
+       .ras_block = {
+               .hw_ops = &sdma_v4_0_ras_hw_ops,
+       },
+};
+
 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
 {
        switch (adev->ip_versions[SDMA0_HWIP][0]) {
        case IP_VERSION(4, 2, 0):
        case IP_VERSION(4, 2, 2):
-               adev->sdma.funcs = &sdma_v4_0_ras_funcs;
+               adev->sdma.ras = &sdma_v4_0_ras;
                break;
        case IP_VERSION(4, 4, 0):
-               adev->sdma.funcs = &sdma_v4_4_ras_funcs;
+               adev->sdma.ras = &sdma_v4_4_ras;
                break;
        default:
                break;
        }
+
+       if (adev->sdma.ras) {
+               amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
+
+               strcpy(adev->sdma.ras->ras_block.name, "sdma");
+               adev->sdma.ras->ras_block.block = AMDGPU_RAS_BLOCK__SDMA;
+
+               /* If don't define special ras_late_init function, use default ras_late_init */
+               if (!adev->sdma.ras->ras_block.ras_late_init)
+                       adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
+
+               /* If don't define special ras_fini function, use default ras_fini */
+               if (!adev->sdma.ras->ras_block.ras_fini)
+                       adev->sdma.ras->ras_block.ras_fini = amdgpu_sdma_ras_fini;
+       }
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
index bf95007f08432cb96fce5a8b8b04381de6d97a69..6f9895cdddb10e41b010c128da121376fb8ec504 100644 (file)
@@ -188,7 +188,7 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
        }
 }
 
-static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
+static int sdma_v4_4_query_ras_error_count_by_instance(struct amdgpu_device *adev,
                                           uint32_t instance,
                                           void *ras_error_status)
 {
@@ -245,9 +245,26 @@ static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = {
-       .ras_late_init = amdgpu_sdma_ras_late_init,
-       .ras_fini = amdgpu_sdma_ras_fini,
+static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,  void *ras_error_status)
+{
+       int i = 0;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
+                       dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
+                       return;
+               }
+       }
+
+}
+
+const struct amdgpu_ras_block_hw_ops sdma_v4_4_ras_hw_ops = {
        .query_ras_error_count = sdma_v4_4_query_ras_error_count,
        .reset_ras_error_count = sdma_v4_4_reset_ras_error_count,
 };
+
+struct amdgpu_sdma_ras sdma_v4_4_ras = {
+       .ras_block = {
+               .hw_ops = &sdma_v4_4_ras_hw_ops,
+       },
+};
index 74a6e5b5e949ae997203768474989475a78e53c3..a9f0c68359e0c1d7e05d36ad8dc3859d3e5a0e3d 100644 (file)
@@ -23,6 +23,6 @@
 #ifndef __SDMA_V4_4_H__
 #define __SDMA_V4_4_H__
 
-extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs;
+extern struct amdgpu_sdma_ras sdma_v4_4_ras;
 
 #endif
index 81e033549dda3bb3f3a1d6c681f480d86f113c12..45e10d5028c579ec3d6a7061588f54c018a51c6f 100644 (file)
@@ -264,7 +264,8 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "navi12";
                break;
        case IP_VERSION(5, 0, 1):
-               if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+               if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2 ||
+                   adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 4))
                        chip_name = "cyan_skillfish2";
                else
                        chip_name = "cyan_skillfish";
index 73ffa8fde3df3f3fbc2d1abf79651c87c6b68329..dd2d66090d23749c318eab2fe7a93c76cb8be205 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "smu_v11_0_i2c.h"
 #include "amdgpu.h"
+#include "amdgpu_dpm.h"
 #include "soc15_common.h"
 #include <drm/drm_fixed.h>
 #include <drm/drm_drv.h>
 
 #define I2C_X_RESTART         BIT(31)
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
 static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT);
 
        reg = REG_SET_FIELD(reg, SMUIO_PWRMGT, i2c_clk_gate_en, en ? 1 : 0);
@@ -75,7 +75,8 @@ static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
 
 static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0);
 
@@ -100,7 +101,8 @@ static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable)
 
 static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        /* do */
        {
                RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CLR_INTR);
@@ -110,7 +112,8 @@ static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control)
 
 static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t reg = 0;
 
        reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_SLAVE_DISABLE, 1);
@@ -131,7 +134,8 @@ static void smu_v11_0_i2c_configure(struct i2c_adapter *control)
 
 static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        /*
         * Standard mode speed, These values are taken from SMUIO MAS,
@@ -154,7 +158,8 @@ static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control)
 
 static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        /* The IC_TAR::IC_TAR field is 10-bits wide.
         * It takes a 7-bit or 10-bit addresses as an address,
@@ -165,7 +170,8 @@ static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address)
 
 static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t ret = I2C_OK;
        uint32_t reg, reg_c_tx_abrt_source;
 
@@ -216,7 +222,8 @@ static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control)
 
 static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t ret = I2C_OK;
        uint32_t reg_ic_status, reg_c_tx_abrt_source;
 
@@ -262,7 +269,8 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control,
                                       u16 address, u8 *data,
                                       u32 numbytes, u32 i2c_flag)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        u32 bytes_sent, reg, ret = I2C_OK;
        unsigned long  timeout_counter;
 
@@ -360,7 +368,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
                                      u16 address, u8 *data,
                                      u32 numbytes, u32 i2c_flag)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t bytes_received, ret = I2C_OK;
 
        bytes_received = 0;
@@ -431,7 +440,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control,
 
 static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        uint32_t reg = 0;
 
        /* Enable I2C engine; */
@@ -447,7 +457,8 @@ static void smu_v11_0_i2c_abort(struct i2c_adapter *control)
 
 static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        const uint32_t IDLE_TIMEOUT = 1024;
        uint32_t timeout_count = 0;
@@ -508,7 +519,8 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control)
 
 static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
        u32 status, enable, en_stat;
        int res;
 
@@ -543,7 +555,8 @@ static void smu_v11_0_i2c_fini(struct i2c_adapter *control)
 
 static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        /* Send  PPSMC_MSG_RequestI2CBus */
        if (!amdgpu_dpm_smu_i2c_bus_access(adev, true))
@@ -554,7 +567,8 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control)
 
 static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        /* Send  PPSMC_MSG_ReleaseI2CBus */
        if (!amdgpu_dpm_smu_i2c_bus_access(adev, false))
@@ -587,16 +601,17 @@ static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control,
 
        if (ret != I2C_OK)
                DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret);
-       
+
        return ret;
 
 }
 
 static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
-       mutex_lock(&adev->pm.smu_i2c_mutex);
+       mutex_lock(&smu_i2c->mutex);
        if (!smu_v11_0_i2c_bus_lock(i2c))
                DRM_ERROR("Failed to lock the bus from SMU");
        else
@@ -611,13 +626,14 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
 
 static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c);
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c);
+       struct amdgpu_device *adev = smu_i2c->adev;
 
        if (!smu_v11_0_i2c_bus_unlock(i2c))
                DRM_ERROR("Failed to unlock the bus from SMU");
        else
                adev->pm.bus_locked = false;
-       mutex_unlock(&adev->pm.smu_i2c_mutex);
+       mutex_unlock(&smu_i2c->mutex);
 }
 
 static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = {
@@ -706,19 +722,26 @@ static const struct i2c_adapter_quirks smu_v11_0_i2c_control_quirks = {
        .flags = I2C_AQ_NO_ZERO_LEN,
 };
 
-int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
+int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
+       struct i2c_adapter *control = &smu_i2c->adapter;
        int res;
 
-       mutex_init(&adev->pm.smu_i2c_mutex);
+       smu_i2c->adev = adev;
+       smu_i2c->port = 0;
+       mutex_init(&smu_i2c->mutex);
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_HWMON;
        control->dev.parent = &adev->pdev->dev;
        control->algo = &smu_v11_0_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
        control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
        control->quirks = &smu_v11_0_i2c_control_quirks;
+       i2c_set_adapdata(control, smu_i2c);
+
+       adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+       adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
 
        res = i2c_add_adapter(control);
        if (res)
@@ -727,9 +750,13 @@ int smu_v11_0_i2c_control_init(struct i2c_adapter *control)
        return res;
 }
 
-void smu_v11_0_i2c_control_fini(struct i2c_adapter *control)
+void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
 {
+       struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
+
        i2c_del_adapter(control);
+       adev->pm.ras_eeprom_i2c_bus = NULL;
+       adev->pm.fru_eeprom_i2c_bus = NULL;
 }
 
 /*
index 44467c05f6421da65d0b4d7c94a9e3de20f9a3da..96ad14288a0c34a5b51064fd500899f68712ef70 100644 (file)
@@ -26,9 +26,9 @@
 
 #include <linux/types.h>
 
-struct i2c_adapter;
+struct amdgpu_device;
 
-int smu_v11_0_i2c_control_init(struct i2c_adapter *control);
-void smu_v11_0_i2c_control_fini(struct i2c_adapter *control);
+int smu_v11_0_i2c_control_init(struct amdgpu_device *adev);
+void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev);
 
 #endif
index 0fc1747e4a70f3f0ea0e1e5f3577a133c4b4dcc2..a0235f75dbcb9c69732e7fb156592eaa94e542fb 100644 (file)
@@ -375,39 +375,6 @@ static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
        return false;
 }
 
-static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
-                                    u8 *bios, u32 length_bytes)
-{
-       u32 *dw_ptr;
-       u32 i, length_dw;
-       uint32_t rom_index_offset;
-       uint32_t rom_data_offset;
-
-       if (bios == NULL)
-               return false;
-       if (length_bytes == 0)
-               return false;
-       /* APU vbios image is part of sbios image */
-       if (adev->flags & AMD_IS_APU)
-               return false;
-
-       dw_ptr = (u32 *)bios;
-       length_dw = ALIGN(length_bytes, 4) / 4;
-
-       rom_index_offset =
-               adev->smuio.funcs->get_rom_index_offset(adev);
-       rom_data_offset =
-               adev->smuio.funcs->get_rom_data_offset(adev);
-
-       /* set rom index to 0 */
-       WREG32(rom_index_offset, 0);
-       /* read out the rom data */
-       for (i = 0; i < length_dw; i++)
-               dw_ptr[i] = RREG32(rom_data_offset);
-
-       return true;
-}
-
 static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
        { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
@@ -925,7 +892,7 @@ static void soc15_pre_asic_init(struct amdgpu_device *adev)
 static const struct amdgpu_asic_funcs soc15_asic_funcs =
 {
        .read_disabled_bios = &soc15_read_disabled_bios,
-       .read_bios_from_rom = &soc15_read_bios_from_rom,
+       .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
        .read_register = &soc15_read_register,
        .reset = &soc15_asic_reset,
        .reset_method = &soc15_asic_reset_method,
@@ -947,7 +914,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
 static const struct amdgpu_asic_funcs vega20_asic_funcs =
 {
        .read_disabled_bios = &soc15_read_disabled_bios,
-       .read_bios_from_rom = &soc15_read_bios_from_rom,
+       .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
        .read_register = &soc15_read_register,
        .reset = &soc15_asic_reset,
        .reset_method = &soc15_asic_reset_method,
@@ -1224,9 +1191,8 @@ static int soc15_common_late_init(void *handle)
        if (amdgpu_sriov_vf(adev))
                xgpu_ai_mailbox_get_irq(adev);
 
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->ras_late_init)
-               r = adev->nbio.ras_funcs->ras_late_init(adev);
+       if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_late_init)
+               r = adev->nbio.ras->ras_block.ras_late_init(adev, NULL);
 
        return r;
 }
@@ -1249,9 +1215,8 @@ static int soc15_common_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->nbio.ras_funcs &&
-           adev->nbio.ras_funcs->ras_fini)
-               adev->nbio.ras_funcs->ras_fini(adev);
+       if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini)
+               adev->nbio.ras->ras_block.ras_fini(adev);
 
        if (adev->df.funcs &&
            adev->df.funcs->sw_fini)
@@ -1318,11 +1283,11 @@ static int soc15_common_hw_fini(void *handle)
 
        if (adev->nbio.ras_if &&
            amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->init_ras_controller_interrupt)
+               if (adev->nbio.ras &&
+                   adev->nbio.ras->init_ras_controller_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
-               if (adev->nbio.ras_funcs &&
-                   adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
+               if (adev->nbio.ras &&
+                   adev->nbio.ras->init_ras_err_event_athub_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
        }
 
index 473767e036767922aafd70f3bc37e4afbfc77a14..acce8c2e0328be4d0a9d122654b259e7d1dd95e1 100644 (file)
 #define SOC15_REG_OFFSET(ip, inst, reg)        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
 #define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
-       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \
-        adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \
+       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
+        amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \
         WREG32(reg, value))
 
 #define __RREG32_SOC15_RLC__(reg, flag, hwip) \
-       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \
-        adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \
+       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \
+        amdgpu_sriov_rreg(adev, reg, flag, hwip) : \
         RREG32(reg))
 
 #define WREG32_FIELD15(ip, idx, reg, field, val)       \
index 5093826a43d101708483f11bfe6700eadefbd737..509d8a1945ebb67d49303ecd10a28a65a4bdb764 100644 (file)
@@ -64,7 +64,8 @@ enum ta_ras_status {
        TA_RAS_STATUS__ERROR_PCS_STATE_ERROR            = 0xA016,
        TA_RAS_STATUS__ERROR_PCS_STATE_HANG             = 0xA017,
        TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN          = 0xA018,
-       TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ      = 0xA019
+       TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ      = 0xA019,
+       TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED          = 0xA01A
 };
 
 enum ta_ras_block {
index 20b44983ac945fbbca71b80b97bbb1f551d61bc3..939cb203f7ad53de64d2b1245d6d2873971ccdf7 100644 (file)
@@ -300,7 +300,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 {
        uint32_t lsb, mc_umc_status_addr;
        uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
-       struct eeprom_table_record *err_rec;
        uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
        if (adev->asic_type == CHIP_ARCTURUS) {
@@ -328,8 +327,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
                return;
        }
 
-       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
        /* calculate error address if ue/ce error is detected */
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
            (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -348,18 +345,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
 
                /* we only save ue error information currently, ce is skipped */
                if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-                               == 1) {
-                       err_rec->address = err_addr;
-                       /* page frame address is saved */
-                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
-                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-                       err_rec->cu = 0;
-                       err_rec->mem_channel = channel_index;
-                       err_rec->mcumc_id = umc_inst;
-
-                       err_data->err_addr_cnt++;
-               }
+                               == 1)
+                       amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
        }
 
        /* clear umc status */
@@ -465,10 +453,14 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
                umc_v6_1_enable_umc_index_mode(adev);
 }
 
-const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
-       .err_cnt_init = umc_v6_1_err_cnt_init,
-       .ras_late_init = amdgpu_umc_ras_late_init,
-       .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v6_1_ras_hw_ops = {
        .query_ras_error_count = umc_v6_1_query_ras_error_count,
        .query_ras_error_address = umc_v6_1_query_ras_error_address,
 };
+
+struct amdgpu_umc_ras umc_v6_1_ras = {
+       .ras_block = {
+               .hw_ops = &umc_v6_1_ras_hw_ops,
+       },
+       .err_cnt_init = umc_v6_1_err_cnt_init,
+};
\ No newline at end of file
index 5dc36c730bb2a25d635042935f044a797b4799ee..50c632eb4cc6ef72a49c4df072b1863c0128e8f2 100644 (file)
@@ -45,7 +45,7 @@
 /* umc ce count initial value */
 #define UMC_V6_1_CE_CNT_INIT   (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
+extern struct amdgpu_umc_ras umc_v6_1_ras;
 extern const uint32_t
        umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
 
index 6dd1e19e8d43234231bfefc0b9bac7f600be6a86..87e4ef18e15121f4dfb9c8c89acfaa6f8df2bd34 100644 (file)
@@ -47,6 +47,13 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
                                              uint32_t umc_inst,
                                              uint32_t ch_inst)
 {
+       uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+       /* adjust umc and channel index offset,
+        * the register address is not linear on each umc instace */
+       umc_inst = index / 4;
+       ch_inst = index % 4;
+
        return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
 }
 
@@ -58,42 +65,33 @@ static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
 }
 
 static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
-                                                  uint32_t channel_index,
+                                                  uint32_t umc_inst, uint32_t ch_inst,
                                                   unsigned long *error_count)
 {
-       uint32_t ecc_err_cnt;
        uint64_t mc_umc_status;
+       uint32_t eccinfo_table_idx;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
-       /*
-        * select the lower chip and check the error count
-        * skip add error count, calc error counter only from mca_umc_status
-        */
-       ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip;
-
-       /*
-        * select the higher chip and check the err counter
-        * skip add error count, calc error counter only from mca_umc_status
-        */
-       ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip;
-
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
        /* check for SRAM correctable error
          MCUMC_STATUS is a 64 bit register */
-       mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
            REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
                *error_count += 1;
 }
 
 static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
-                                                     uint32_t channel_index,
+                                                         uint32_t umc_inst, uint32_t ch_inst,
                                                      unsigned long *error_count)
 {
        uint64_t mc_umc_status;
+       uint32_t eccinfo_table_idx;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
        /* check the MCUMC_STATUS */
-       mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
        if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
            (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
            REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -110,42 +108,34 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
 
        uint32_t umc_inst        = 0;
        uint32_t ch_inst         = 0;
-       uint32_t umc_reg_offset  = 0;
-       uint32_t channel_index   = 0;
 
        /*TODO: driver needs to toggle DF Cstate to ensure
         * safe access of UMC registers. Will add the protection */
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
-                                                        umc_inst,
-                                                        ch_inst);
-               channel_index = get_umc_v6_7_channel_index(adev,
-                                                        umc_inst,
-                                                        ch_inst);
                umc_v6_7_ecc_info_query_correctable_error_count(adev,
-                                                     channel_index,
+                                                     umc_inst, ch_inst,
                                                      &(err_data->ce_count));
                umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev,
-                                                         channel_index,
+                                                     umc_inst, ch_inst,
                                                          &(err_data->ue_count));
        }
 }
 
 static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
                                         struct ras_err_data *err_data,
-                                        uint32_t umc_reg_offset,
                                         uint32_t ch_inst,
                                         uint32_t umc_inst)
 {
-       uint64_t mc_umc_status, err_addr, retired_page;
-       struct eeprom_table_record *err_rec;
+       uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
        uint32_t channel_index;
+       uint32_t eccinfo_table_idx;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
        channel_index =
                adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
-       mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status;
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
 
        if (mc_umc_status == 0)
                return;
@@ -153,34 +143,39 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
        if (!err_data->err_addr)
                return;
 
-       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
        /* calculate error address if ue/ce error is detected */
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
            (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
            REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
 
-               err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr;
+               err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
                err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
                /* translate umc channel address to soc pa, 3 parts are included */
-               retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+               soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
                                ADDR_OF_256B_BLOCK(channel_index) |
                                OFFSET_IN_256B_BLOCK(err_addr);
 
+               /* The umc channel bits are not original values, they are hashed */
+               SET_CHANNEL_HASH(channel_index, soc_pa);
+
+               /* clear [C4 C3 C2] in soc physical address */
+               soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
                /* we only save ue error information currently, ce is skipped */
                if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
                                == 1) {
-                       err_rec->address = err_addr;
-                       /* page frame address is saved */
-                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
-                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-                       err_rec->cu = 0;
-                       err_rec->mem_channel = channel_index;
-                       err_rec->mcumc_id = umc_inst;
-
-                       err_data->err_addr_cnt++;
+                       /* loop for all possibilities of [C4 C3 C2] */
+                       for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+                               retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+                               amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
+
+                               /* shift R14 bit */
+                               retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+                               amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
+                       }
                }
        }
 }
@@ -192,18 +187,13 @@ static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev
 
        uint32_t umc_inst        = 0;
        uint32_t ch_inst         = 0;
-       uint32_t umc_reg_offset  = 0;
 
        /*TODO: driver needs to toggle DF Cstate to ensure
         * safe access of UMC resgisters. Will add the protection
         * when firmware interface is ready */
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
-                                                        umc_inst,
-                                                        ch_inst);
                umc_v6_7_ecc_info_query_error_address(adev,
                                             err_data,
-                                            umc_reg_offset,
                                             ch_inst,
                                             umc_inst);
        }
@@ -365,9 +355,9 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
                                         uint32_t umc_inst)
 {
        uint32_t mc_umc_status_addr;
-       uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
-       struct eeprom_table_record *err_rec;
        uint32_t channel_index;
+       uint64_t mc_umc_status, mc_umc_addrt0;
+       uint64_t err_addr, soc_pa, retired_page, column;
 
        mc_umc_status_addr =
                SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -385,8 +375,6 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
                return;
        }
 
-       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
        channel_index =
                adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
@@ -399,23 +387,30 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
                err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
                /* translate umc channel address to soc pa, 3 parts are included */
-               retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+               soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
                                ADDR_OF_256B_BLOCK(channel_index) |
                                OFFSET_IN_256B_BLOCK(err_addr);
 
+               /* The umc channel bits are not original values, they are hashed */
+               SET_CHANNEL_HASH(channel_index, soc_pa);
+
+               /* clear [C4 C3 C2] in soc physical address */
+               soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
                /* we only save ue error information currently, ce is skipped */
                if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
                                == 1) {
-                       err_rec->address = err_addr;
-                       /* page frame address is saved */
-                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
-                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-                       err_rec->cu = 0;
-                       err_rec->mem_channel = channel_index;
-                       err_rec->mcumc_id = umc_inst;
-
-                       err_data->err_addr_cnt++;
+                       /* loop for all possibilities of [C4 C3 C2] */
+                       for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+                               retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+                               amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
+
+                               /* shift R14 bit */
+                               retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+                               amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
+                       }
                }
        }
 
@@ -463,28 +458,24 @@ static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
 
 static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
 {
-       uint32_t umc_inst        = 0;
-       uint32_t ch_inst         = 0;
        uint32_t umc_reg_offset  = 0;
 
-       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
-                                                       umc_inst,
-                                                       ch_inst);
-               /* Enabling fatal error in one channel will be considered
-                  as fatal error mode */
-               if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset))
-                       return false;
-       }
-
-       return true;
+       /* Enabling fatal error in umc instance0 channel0 will be
+        * considered as fatal error mode
+        */
+       umc_reg_offset = get_umc_v6_7_reg_offset(adev, 0, 0);
+       return !umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
 }
 
-const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
-       .ras_late_init = amdgpu_umc_ras_late_init,
-       .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = {
        .query_ras_error_count = umc_v6_7_query_ras_error_count,
        .query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
+
+struct amdgpu_umc_ras umc_v6_7_ras = {
+       .ras_block = {
+               .hw_ops = &umc_v6_7_ras_hw_ops,
+       },
        .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
        .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
        .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
index 57f2557e7acab1b7c1431d4971a1ddc3b574e83b..fe41ed2f5945119a357cee57d2a2052ed2746b69 100644 (file)
 #define UMC_V6_7_CHANNEL_INSTANCE_NUM          8
 /* total channel instances in one umc block */
 #define UMC_V6_7_TOTAL_CHANNEL_NUM     (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM)
+/* one piece of normalizing address is mapped to 8 pieces of physical address */
+#define UMC_V6_7_NA_MAP_PA_NUM 8
+/* R14 bit shift should be considered, double the number */
+#define UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL      (UMC_V6_7_NA_MAP_PA_NUM * 2)
+/* The CH4 bit in SOC physical address */
+#define UMC_V6_7_PA_CH4_BIT    12
+/* The C2 bit in SOC physical address */
+#define UMC_V6_7_PA_C2_BIT     17
+/* The R14 bit in SOC physical address */
+#define UMC_V6_7_PA_R14_BIT    34
 /* UMC regiser per channel offset */
 #define UMC_V6_7_PER_CHANNEL_OFFSET            0x400
-extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+/* XOR bit 20, 25, 34 of PA into CH4 bit (bit 12 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define CHANNEL_HASH(channel_idx, pa) (((channel_idx) >> 4) ^ \
+                       (((pa)  >> 20) & 0x1ULL & adev->df.hash_status.hash_64k) ^ \
+                       (((pa)  >> 25) & 0x1ULL & adev->df.hash_status.hash_2m) ^ \
+                       (((pa)  >> 34) & 0x1ULL & adev->df.hash_status.hash_1g))
+#define SET_CHANNEL_HASH(channel_idx, pa) do { \
+               (pa) &= ~(0x1ULL << UMC_V6_7_PA_CH4_BIT); \
+               (pa) |= (CHANNEL_HASH(channel_idx, pa) << UMC_V6_7_PA_CH4_BIT); \
+       } while (0)
+
+extern struct amdgpu_umc_ras umc_v6_7_ras;
 extern const uint32_t
        umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM];
 extern const uint32_t
index af59a35788e3eefe68d6b98137fe541f0cbf4b2d..de85a998ef9944fe56e65584f134b511ea9f0357 100644 (file)
@@ -40,13 +40,144 @@ const uint32_t
                {9, 0},   {15, 6}
 };
 
-static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev,
+static inline uint32_t get_umc_v8_7_reg_offset(struct amdgpu_device *adev,
                                            uint32_t umc_inst,
                                            uint32_t ch_inst)
 {
        return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst;
 }
 
+static inline uint32_t get_umc_v8_7_channel_index(struct amdgpu_device *adev,
+                                               uint32_t umc_inst,
+                                               uint32_t ch_inst)
+{
+       return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+}
+
+static void umc_v8_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
+                                               uint32_t umc_inst, uint32_t ch_inst,
+                                               unsigned long *error_count)
+{
+       uint64_t mc_umc_status;
+       uint32_t eccinfo_table_idx;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+       /* check for SRAM correctable error
+        * MCUMC_STATUS is a 64 bit register
+        */
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+               *error_count += 1;
+}
+
+static void umc_v8_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+                                                       uint32_t umc_inst, uint32_t ch_inst,
+                                                       unsigned long *error_count)
+{
+       uint64_t mc_umc_status;
+       uint32_t eccinfo_table_idx;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+
+       /* check the MCUMC_STATUS */
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+       if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+               *error_count += 1;
+}
+
+static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
+                                       void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+
+       /* TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC registers. Will add the protection
+        */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_v8_7_ecc_info_query_correctable_error_count(adev,
+                                                       umc_inst, ch_inst,
+                                                       &(err_data->ce_count));
+               umc_v8_7_ecc_info_querry_uncorrectable_error_count(adev,
+                                                       umc_inst, ch_inst,
+                                                       &(err_data->ue_count));
+       }
+}
+
+static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev,
+                                       struct ras_err_data *err_data,
+                                       uint32_t ch_inst,
+                                       uint32_t umc_inst)
+{
+       uint64_t mc_umc_status, err_addr, retired_page;
+       uint32_t channel_index;
+       uint32_t eccinfo_table_idx;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+       eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
+       channel_index =
+               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+       mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
+
+       if (mc_umc_status == 0)
+               return;
+
+       if (!err_data->err_addr)
+               return;
+
+       /* calculate error address if ue/ce error is detected */
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+               err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
+               err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+               /* translate umc channel address to soc pa, 3 parts are included */
+               retired_page = ADDR_OF_4KB_BLOCK(err_addr) |
+                               ADDR_OF_256B_BLOCK(channel_index) |
+                               OFFSET_IN_256B_BLOCK(err_addr);
+
+               /* we only save ue error information currently, ce is skipped */
+               if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+                               == 1)
+                       amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
+       }
+}
+
+static void umc_v8_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev,
+                                       void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+
+       /* TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC resgisters. Will add the protection
+        * when firmware interface is ready
+        */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_v8_7_ecc_info_query_error_address(adev,
+                                               err_data,
+                                               ch_inst,
+                                               umc_inst);
+       }
+}
+
 static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev,
                                        uint32_t umc_reg_offset)
 {
@@ -92,7 +223,7 @@ static void umc_v8_7_clear_error_count(struct amdgpu_device *adev)
        uint32_t umc_reg_offset  = 0;
 
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_8_reg_offset(adev,
+               umc_reg_offset = get_umc_v8_7_reg_offset(adev,
                                                umc_inst,
                                                ch_inst);
 
@@ -178,7 +309,7 @@ static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev,
        uint32_t umc_reg_offset  = 0;
 
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_8_reg_offset(adev,
+               umc_reg_offset = get_umc_v8_7_reg_offset(adev,
                                                      umc_inst,
                                                      ch_inst);
 
@@ -201,7 +332,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 {
        uint32_t lsb, mc_umc_status_addr;
        uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
-       struct eeprom_table_record *err_rec;
        uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
 
        mc_umc_status_addr =
@@ -220,8 +350,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
                return;
        }
 
-       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
-
        /* calculate error address if ue/ce error is detected */
        if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
            (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
@@ -240,18 +368,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev,
 
                /* we only save ue error information currently, ce is skipped */
                if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
-                               == 1) {
-                       err_rec->address = err_addr;
-                       /* page frame address is saved */
-                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
-                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
-                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
-                       err_rec->cu = 0;
-                       err_rec->mem_channel = channel_index;
-                       err_rec->mcumc_id = umc_inst;
-
-                       err_data->err_addr_cnt++;
-               }
+                               == 1)
+                       amdgpu_umc_fill_error_record(err_data, err_addr,
+                                       retired_page, channel_index, umc_inst);
        }
 
        /* clear umc status */
@@ -268,7 +387,7 @@ static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev,
        uint32_t umc_reg_offset  = 0;
 
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_8_reg_offset(adev,
+               umc_reg_offset = get_umc_v8_7_reg_offset(adev,
                                                      umc_inst,
                                                      ch_inst);
 
@@ -316,7 +435,7 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
        uint32_t umc_reg_offset  = 0;
 
        LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
-               umc_reg_offset = get_umc_8_reg_offset(adev,
+               umc_reg_offset = get_umc_v8_7_reg_offset(adev,
                                                      umc_inst,
                                                      ch_inst);
 
@@ -324,10 +443,16 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
-       .err_cnt_init = umc_v8_7_err_cnt_init,
-       .ras_late_init = amdgpu_umc_ras_late_init,
-       .ras_fini = amdgpu_umc_ras_fini,
+const struct amdgpu_ras_block_hw_ops umc_v8_7_ras_hw_ops = {
        .query_ras_error_count = umc_v8_7_query_ras_error_count,
        .query_ras_error_address = umc_v8_7_query_ras_error_address,
 };
+
+struct amdgpu_umc_ras umc_v8_7_ras = {
+       .ras_block = {
+               .hw_ops = &umc_v8_7_ras_hw_ops,
+       },
+       .err_cnt_init = umc_v8_7_err_cnt_init,
+       .ecc_info_query_ras_error_count = umc_v8_7_ecc_info_query_ras_error_count,
+       .ecc_info_query_ras_error_address = umc_v8_7_ecc_info_query_ras_error_address,
+};
index 37e6dc7c28e0d963f035a21d649a20824c7415d8..dd4993f5f78f4d8aad5bdd5cd907631680dd793c 100644 (file)
@@ -44,7 +44,7 @@
 /* umc ce count initial value */
 #define UMC_V8_7_CE_CNT_INIT   (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
+extern struct amdgpu_umc_ras umc_v8_7_ras;
 extern const uint32_t
        umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
 
index c4f3aff110728c858b76376cee668de45518c664..19cfbf9577b41bb48c767713d2343ab565c00ebf 100644 (file)
@@ -51,8 +51,6 @@ AMDKFD_FILES  := $(AMDKFD_PATH)/kfd_module.o \
                $(AMDKFD_PATH)/kfd_events.o \
                $(AMDKFD_PATH)/cik_event_interrupt.o \
                $(AMDKFD_PATH)/kfd_int_process_v9.o \
-               $(AMDKFD_PATH)/kfd_dbgdev.o \
-               $(AMDKFD_PATH)/kfd_dbgmgr.o \
                $(AMDKFD_PATH)/kfd_smi_events.o \
                $(AMDKFD_PATH)/kfd_crat.o
 
index d60576ce10cdfb6ecaa149f74d9c7589272f06ec..5c8023cba1961f40a78f22de007031090fa84efd 100644 (file)
@@ -110,7 +110,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
                struct kfd_vm_fault_info info;
 
                kfd_smi_event_update_vmfault(dev, pasid);
-               kfd_process_vm_fault(dev->dqm, pasid);
+               kfd_dqm_evict_pasid(dev->dqm, pasid);
 
                memset(&info, 0, sizeof(info));
                amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
index 4bfc0c8ab764be48fe14f6cb6b44b651a1b22723..54d997f304b516cc1b70a05e3dcbaa9174772297 100644 (file)
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
+#include <linux/ptrace.h>
 #include <linux/dma-buf.h>
+#include <linux/fdtable.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
-#include "kfd_dbgmgr.h"
 #include "kfd_svm.h"
 #include "amdgpu_amdkfd.h"
 #include "kfd_smi_events.h"
+#include "amdgpu_dma_buf.h"
 
 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
 static int kfd_open(struct inode *, struct file *);
@@ -292,14 +294,17 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
                return err;
 
        pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev) {
-               pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
-               return -EINVAL;
-       }
 
        mutex_lock(&p->mutex);
 
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
+               err = -EINVAL;
+               goto err_pdd;
+       }
+       dev = pdd->dev;
+
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
                err = -ESRCH;
@@ -310,7 +315,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
                        p->pasid,
                        dev->id);
 
-       err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+       err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, NULL, NULL, NULL,
                        &doorbell_offset_in_process);
        if (err != 0)
                goto err_create_queue;
@@ -344,6 +349,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
 
 err_create_queue:
 err_bind_process:
+err_pdd:
        mutex_unlock(&p->mutex);
        return err;
 }
@@ -490,7 +496,6 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_set_memory_policy_args *args = data;
-       struct kfd_dev *dev;
        int err = 0;
        struct kfd_process_device *pdd;
        enum cache_policy default_policy, alternate_policy;
@@ -505,13 +510,15 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
                return -EINVAL;
        }
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
        mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
+               err = -EINVAL;
+               goto err_pdd;
+       }
 
-       pdd = kfd_bind_process_to_device(dev, p);
+       pdd = kfd_bind_process_to_device(pdd->dev, p);
        if (IS_ERR(pdd)) {
                err = -ESRCH;
                goto out;
@@ -524,7 +531,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
                (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                   ? cache_policy_coherent : cache_policy_noncoherent;
 
-       if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
+       if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
                                &pdd->qpd,
                                default_policy,
                                alternate_policy,
@@ -533,6 +540,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
                err = -EINVAL;
 
 out:
+err_pdd:
        mutex_unlock(&p->mutex);
 
        return err;
@@ -542,17 +550,18 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_set_trap_handler_args *args = data;
-       struct kfd_dev *dev;
        int err = 0;
        struct kfd_process_device *pdd;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
        mutex_lock(&p->mutex);
 
-       pdd = kfd_bind_process_to_device(dev, p);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               err = -EINVAL;
+               goto err_pdd;
+       }
+
+       pdd = kfd_bind_process_to_device(pdd->dev, p);
        if (IS_ERR(pdd)) {
                err = -ESRCH;
                goto out;
@@ -561,6 +570,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
        kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
 
 out:
+err_pdd:
        mutex_unlock(&p->mutex);
 
        return err;
@@ -569,289 +579,40 @@ out:
 static int kfd_ioctl_dbg_register(struct file *filep,
                                struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_dbg_register_args *args = data;
-       struct kfd_dev *dev;
-       struct kfd_dbgmgr *dbgmgr_ptr;
-       struct kfd_process_device *pdd;
-       bool create_ok;
-       long status = 0;
-
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
-       if (dev->adev->asic_type == CHIP_CARRIZO) {
-               pr_debug("kfd_ioctl_dbg_register not supported on CZ\n");
-               return -EINVAL;
-       }
-
-       mutex_lock(&p->mutex);
-       mutex_lock(kfd_get_dbgmgr_mutex());
-
-       /*
-        * make sure that we have pdd, if this the first queue created for
-        * this process
-        */
-       pdd = kfd_bind_process_to_device(dev, p);
-       if (IS_ERR(pdd)) {
-               status = PTR_ERR(pdd);
-               goto out;
-       }
-
-       if (!dev->dbgmgr) {
-               /* In case of a legal call, we have no dbgmgr yet */
-               create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
-               if (create_ok) {
-                       status = kfd_dbgmgr_register(dbgmgr_ptr, p);
-                       if (status != 0)
-                               kfd_dbgmgr_destroy(dbgmgr_ptr);
-                       else
-                               dev->dbgmgr = dbgmgr_ptr;
-               }
-       } else {
-               pr_debug("debugger already registered\n");
-               status = -EINVAL;
-       }
-
-out:
-       mutex_unlock(kfd_get_dbgmgr_mutex());
-       mutex_unlock(&p->mutex);
-
-       return status;
+       return -EPERM;
 }
 
 static int kfd_ioctl_dbg_unregister(struct file *filep,
                                struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_dbg_unregister_args *args = data;
-       struct kfd_dev *dev;
-       long status;
-
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev || !dev->dbgmgr)
-               return -EINVAL;
-
-       if (dev->adev->asic_type == CHIP_CARRIZO) {
-               pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
-               return -EINVAL;
-       }
-
-       mutex_lock(kfd_get_dbgmgr_mutex());
-
-       status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
-       if (!status) {
-               kfd_dbgmgr_destroy(dev->dbgmgr);
-               dev->dbgmgr = NULL;
-       }
-
-       mutex_unlock(kfd_get_dbgmgr_mutex());
-
-       return status;
+       return -EPERM;
 }
 
-/*
- * Parse and generate variable size data structure for address watch.
- * Total size of the buffer and # watch points is limited in order
- * to prevent kernel abuse. (no bearing to the much smaller HW limitation
- * which is enforced by dbgdev module)
- * please also note that the watch address itself are not "copied from user",
- * since it be set into the HW in user mode values.
- *
- */
 static int kfd_ioctl_dbg_address_watch(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_dbg_address_watch_args *args = data;
-       struct kfd_dev *dev;
-       struct dbg_address_watch_info aw_info;
-       unsigned char *args_buff;
-       long status;
-       void __user *cmd_from_user;
-       uint64_t watch_mask_value = 0;
-       unsigned int args_idx = 0;
-
-       memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
-
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
-       if (dev->adev->asic_type == CHIP_CARRIZO) {
-               pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
-               return -EINVAL;
-       }
-
-       cmd_from_user = (void __user *) args->content_ptr;
-
-       /* Validate arguments */
-
-       if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
-               (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
-               (cmd_from_user == NULL))
-               return -EINVAL;
-
-       /* this is the actual buffer to work with */
-       args_buff = memdup_user(cmd_from_user,
-                               args->buf_size_in_bytes - sizeof(*args));
-       if (IS_ERR(args_buff))
-               return PTR_ERR(args_buff);
-
-       aw_info.process = p;
-
-       aw_info.num_watch_points = *((uint32_t *)(&args_buff[args_idx]));
-       args_idx += sizeof(aw_info.num_watch_points);
-
-       aw_info.watch_mode = (enum HSA_DBG_WATCH_MODE *) &args_buff[args_idx];
-       args_idx += sizeof(enum HSA_DBG_WATCH_MODE) * aw_info.num_watch_points;
-
-       /*
-        * set watch address base pointer to point on the array base
-        * within args_buff
-        */
-       aw_info.watch_address = (uint64_t *) &args_buff[args_idx];
-
-       /* skip over the addresses buffer */
-       args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
-
-       if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
-               status = -EINVAL;
-               goto out;
-       }
-
-       watch_mask_value = (uint64_t) args_buff[args_idx];
-
-       if (watch_mask_value > 0) {
-               /*
-                * There is an array of masks.
-                * set watch mask base pointer to point on the array base
-                * within args_buff
-                */
-               aw_info.watch_mask = (uint64_t *) &args_buff[args_idx];
-
-               /* skip over the masks buffer */
-               args_idx += sizeof(aw_info.watch_mask) *
-                               aw_info.num_watch_points;
-       } else {
-               /* just the NULL mask, set to NULL and skip over it */
-               aw_info.watch_mask = NULL;
-               args_idx += sizeof(aw_info.watch_mask);
-       }
-
-       if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
-               status = -EINVAL;
-               goto out;
-       }
-
-       /* Currently HSA Event is not supported for DBG */
-       aw_info.watch_event = NULL;
-
-       mutex_lock(kfd_get_dbgmgr_mutex());
-
-       status = kfd_dbgmgr_address_watch(dev->dbgmgr, &aw_info);
-
-       mutex_unlock(kfd_get_dbgmgr_mutex());
-
-out:
-       kfree(args_buff);
-
-       return status;
+       return -EPERM;
 }
 
 /* Parse and generate fixed size data structure for wave control */
 static int kfd_ioctl_dbg_wave_control(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
-       struct kfd_ioctl_dbg_wave_control_args *args = data;
-       struct kfd_dev *dev;
-       struct dbg_wave_control_info wac_info;
-       unsigned char *args_buff;
-       uint32_t computed_buff_size;
-       long status;
-       void __user *cmd_from_user;
-       unsigned int args_idx = 0;
-
-       memset((void *) &wac_info, 0, sizeof(struct dbg_wave_control_info));
-
-       /* we use compact form, independent of the packing attribute value */
-       computed_buff_size = sizeof(*args) +
-                               sizeof(wac_info.mode) +
-                               sizeof(wac_info.operand) +
-                               sizeof(wac_info.dbgWave_msg.DbgWaveMsg) +
-                               sizeof(wac_info.dbgWave_msg.MemoryVA) +
-                               sizeof(wac_info.trapId);
-
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
-       if (dev->adev->asic_type == CHIP_CARRIZO) {
-               pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n");
-               return -EINVAL;
-       }
-
-       /* input size must match the computed "compact" size */
-       if (args->buf_size_in_bytes != computed_buff_size) {
-               pr_debug("size mismatch, computed : actual %u : %u\n",
-                               args->buf_size_in_bytes, computed_buff_size);
-               return -EINVAL;
-       }
-
-       cmd_from_user = (void __user *) args->content_ptr;
-
-       if (cmd_from_user == NULL)
-               return -EINVAL;
-
-       /* copy the entire buffer from user */
-
-       args_buff = memdup_user(cmd_from_user,
-                               args->buf_size_in_bytes - sizeof(*args));
-       if (IS_ERR(args_buff))
-               return PTR_ERR(args_buff);
-
-       /* move ptr to the start of the "pay-load" area */
-       wac_info.process = p;
-
-       wac_info.operand = *((enum HSA_DBG_WAVEOP *)(&args_buff[args_idx]));
-       args_idx += sizeof(wac_info.operand);
-
-       wac_info.mode = *((enum HSA_DBG_WAVEMODE *)(&args_buff[args_idx]));
-       args_idx += sizeof(wac_info.mode);
-
-       wac_info.trapId = *((uint32_t *)(&args_buff[args_idx]));
-       args_idx += sizeof(wac_info.trapId);
-
-       wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value =
-                                       *((uint32_t *)(&args_buff[args_idx]));
-       wac_info.dbgWave_msg.MemoryVA = NULL;
-
-       mutex_lock(kfd_get_dbgmgr_mutex());
-
-       pr_debug("Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u\n",
-                       wac_info.process, wac_info.operand,
-                       wac_info.mode, wac_info.trapId,
-                       wac_info.dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
-
-       status = kfd_dbgmgr_wave_control(dev->dbgmgr, &wac_info);
-
-       pr_debug("Returned status of dbg manager is %ld\n", status);
-
-       mutex_unlock(kfd_get_dbgmgr_mutex());
-
-       kfree(args_buff);
-
-       return status;
+       return -EPERM;
 }
 
 static int kfd_ioctl_get_clock_counters(struct file *filep,
                                struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_get_clock_counters_args *args = data;
-       struct kfd_dev *dev;
+       struct kfd_process_device *pdd;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (dev)
+       mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       mutex_unlock(&p->mutex);
+       if (pdd)
                /* Reading GPU clock counter from KGD */
-               args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev);
+               args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
        else
                /* Node without GPU resource */
                args->gpu_clock_counter = 0;
@@ -1007,57 +768,11 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
         * through the event_page_offset field.
         */
        if (args->event_page_offset) {
-               struct kfd_dev *kfd;
-               struct kfd_process_device *pdd;
-               void *mem, *kern_addr;
-               uint64_t size;
-
-               kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
-               if (!kfd) {
-                       pr_err("Getting device by id failed in %s\n", __func__);
-                       return -EINVAL;
-               }
-
                mutex_lock(&p->mutex);
-
-               if (p->signal_page) {
-                       pr_err("Event page is already set\n");
-                       err = -EINVAL;
-                       goto out_unlock;
-               }
-
-               pdd = kfd_bind_process_to_device(kfd, p);
-               if (IS_ERR(pdd)) {
-                       err = PTR_ERR(pdd);
-                       goto out_unlock;
-               }
-
-               mem = kfd_process_device_translate_handle(pdd,
-                               GET_IDR_HANDLE(args->event_page_offset));
-               if (!mem) {
-                       pr_err("Can't find BO, offset is 0x%llx\n",
-                              args->event_page_offset);
-                       err = -EINVAL;
-                       goto out_unlock;
-               }
-
-               err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev,
-                                               mem, &kern_addr, &size);
-               if (err) {
-                       pr_err("Failed to map event page to kernel\n");
-                       goto out_unlock;
-               }
-
-               err = kfd_event_page_set(p, kern_addr, size);
-               if (err) {
-                       pr_err("Failed to set event page\n");
-                       amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem);
-                       goto out_unlock;
-               }
-
-               p->signal_handle = args->event_page_offset;
-
+               err = kfd_kmap_event_page(p, args->event_page_offset);
                mutex_unlock(&p->mutex);
+               if (err)
+                       return err;
        }
 
        err = kfd_event_create(filp, p, args->event_type,
@@ -1066,10 +781,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
                                &args->event_page_offset,
                                &args->event_slot_index);
 
-       return err;
-
-out_unlock:
-       mutex_unlock(&p->mutex);
+       pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
        return err;
 }
 
@@ -1118,11 +830,13 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
        struct kfd_dev *dev;
        long err;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
        mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               err = -EINVAL;
+               goto err_pdd;
+       }
+       dev = pdd->dev;
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
@@ -1142,6 +856,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
        return 0;
 
 bind_process_to_device_fail:
+err_pdd:
        mutex_unlock(&p->mutex);
        return err;
 }
@@ -1150,15 +865,17 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
                struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_get_tile_config_args *args = data;
-       struct kfd_dev *dev;
+       struct kfd_process_device *pdd;
        struct tile_config config;
        int err = 0;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
+       mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       mutex_unlock(&p->mutex);
+       if (!pdd)
                return -EINVAL;
 
-       amdgpu_amdkfd_get_tile_config(dev->adev, &config);
+       amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
 
        args->gb_addr_config = config.gb_addr_config;
        args->num_banks = config.num_banks;
@@ -1193,40 +910,37 @@ static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
 {
        struct kfd_ioctl_acquire_vm_args *args = data;
        struct kfd_process_device *pdd;
-       struct kfd_dev *dev;
        struct file *drm_file;
        int ret;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
        drm_file = fget(args->drm_fd);
        if (!drm_file)
                return -EINVAL;
 
        mutex_lock(&p->mutex);
-
-       pdd = kfd_get_process_device_data(dev, p);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
        if (!pdd) {
                ret = -EINVAL;
-               goto err_unlock;
+               goto err_pdd;
        }
 
        if (pdd->drm_file) {
                ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
-               goto err_unlock;
+               goto err_drm_file;
        }
 
        ret = kfd_process_device_init_vm(pdd, drm_file);
        if (ret)
                goto err_unlock;
+
        /* On success, the PDD keeps the drm_file reference */
        mutex_unlock(&p->mutex);
 
        return 0;
 
 err_unlock:
+err_pdd:
+err_drm_file:
        mutex_unlock(&p->mutex);
        fput(drm_file);
        return ret;
@@ -1283,19 +997,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
        }
        mutex_unlock(&p->svms.lock);
 #endif
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
+       mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               err = -EINVAL;
+               goto err_pdd;
+       }
+
+       dev = pdd->dev;
 
        if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
                (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
                !kfd_dev_is_large_bar(dev)) {
                pr_err("Alloc host visible vram on small bar is not allowed\n");
-               return -EINVAL;
+               err = -EINVAL;
+               goto err_large_bar;
        }
 
-       mutex_lock(&p->mutex);
-
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
                err = PTR_ERR(pdd);
@@ -1323,7 +1041,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                dev->adev, args->va_addr, args->size,
                pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
-               flags);
+               flags, false);
 
        if (err)
                goto err_unlock;
@@ -1356,6 +1074,8 @@ err_free:
        amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
                                               pdd->drm_priv, NULL);
 err_unlock:
+err_pdd:
+err_large_bar:
        mutex_unlock(&p->mutex);
        return err;
 }
@@ -1366,14 +1086,9 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        struct kfd_ioctl_free_memory_of_gpu_args *args = data;
        struct kfd_process_device *pdd;
        void *mem;
-       struct kfd_dev *dev;
        int ret;
        uint64_t size = 0;
 
-       dev = kfd_device_by_id(GET_GPU_ID(args->handle));
-       if (!dev)
-               return -EINVAL;
-
        mutex_lock(&p->mutex);
        /*
         * Safeguard to prevent user space from freeing signal BO.
@@ -1385,11 +1100,11 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
-       pdd = kfd_get_process_device_data(dev, p);
+       pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
        if (!pdd) {
                pr_err("Process device data doesn't exist\n");
                ret = -EINVAL;
-               goto err_unlock;
+               goto err_pdd;
        }
 
        mem = kfd_process_device_translate_handle(
@@ -1399,7 +1114,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
-       ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev,
+       ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
                                (struct kgd_mem *)mem, pdd->drm_priv, &size);
 
        /* If freeing the buffer failed, leave the handle in place for
@@ -1412,26 +1127,30 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
        WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
 
 err_unlock:
+err_pdd:
        mutex_unlock(&p->mutex);
        return ret;
 }
 
+static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) {
+       return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+              (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
+               dev->adev->sdma.instance[0].fw_version >= 18) ||
+              KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+}
+
 static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                                        struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_map_memory_to_gpu_args *args = data;
        struct kfd_process_device *pdd, *peer_pdd;
        void *mem;
-       struct kfd_dev *dev, *peer;
+       struct kfd_dev *dev;
        long err = 0;
        int i;
        uint32_t *devices_arr = NULL;
        bool table_freed = false;
 
-       dev = kfd_device_by_id(GET_GPU_ID(args->handle));
-       if (!dev)
-               return -EINVAL;
-
        if (!args->n_devices) {
                pr_debug("Device IDs array empty\n");
                return -EINVAL;
@@ -1455,6 +1174,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        }
 
        mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
+       if (!pdd) {
+               err = -EINVAL;
+               goto get_process_device_data_failed;
+       }
+       dev = pdd->dev;
 
        pdd = kfd_bind_process_to_device(dev, p);
        if (IS_ERR(pdd)) {
@@ -1470,21 +1195,22 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        }
 
        for (i = args->n_success; i < args->n_devices; i++) {
-               peer = kfd_device_by_id(devices_arr[i]);
-               if (!peer) {
+               peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+               if (!peer_pdd) {
                        pr_debug("Getting device by id failed for 0x%x\n",
                                 devices_arr[i]);
                        err = -EINVAL;
                        goto get_mem_obj_from_handle_failed;
                }
 
-               peer_pdd = kfd_bind_process_to_device(peer, p);
+               peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
                if (IS_ERR(peer_pdd)) {
                        err = PTR_ERR(peer_pdd);
                        goto get_mem_obj_from_handle_failed;
                }
+
                err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-                       peer->adev, (struct kgd_mem *)mem,
+                       peer_pdd->dev->adev, (struct kgd_mem *)mem,
                        peer_pdd->drm_priv, &table_freed);
                if (err) {
                        pr_err("Failed to map to gpu %d/%d\n",
@@ -1503,12 +1229,9 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        }
 
        /* Flush TLBs after waiting for the page table updates to complete */
-       if (table_freed) {
+       if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
                for (i = 0; i < args->n_devices; i++) {
-                       peer = kfd_device_by_id(devices_arr[i]);
-                       if (WARN_ON_ONCE(!peer))
-                               continue;
-                       peer_pdd = kfd_get_process_device_data(peer, p);
+                       peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
                        if (WARN_ON_ONCE(!peer_pdd))
                                continue;
                        kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
@@ -1518,6 +1241,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
 
        return err;
 
+get_process_device_data_failed:
 bind_process_to_device_failed:
 get_mem_obj_from_handle_failed:
 map_memory_to_gpu_failed:
@@ -1535,14 +1259,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
        struct kfd_process_device *pdd, *peer_pdd;
        void *mem;
-       struct kfd_dev *dev, *peer;
        long err = 0;
        uint32_t *devices_arr = NULL, i;
 
-       dev = kfd_device_by_id(GET_GPU_ID(args->handle));
-       if (!dev)
-               return -EINVAL;
-
        if (!args->n_devices) {
                pr_debug("Device IDs array empty\n");
                return -EINVAL;
@@ -1566,8 +1285,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        }
 
        mutex_lock(&p->mutex);
-
-       pdd = kfd_get_process_device_data(dev, p);
+       pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
        if (!pdd) {
                err = -EINVAL;
                goto bind_process_to_device_failed;
@@ -1581,19 +1299,13 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        }
 
        for (i = args->n_success; i < args->n_devices; i++) {
-               peer = kfd_device_by_id(devices_arr[i]);
-               if (!peer) {
-                       err = -EINVAL;
-                       goto get_mem_obj_from_handle_failed;
-               }
-
-               peer_pdd = kfd_get_process_device_data(peer, p);
+               peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
                if (!peer_pdd) {
-                       err = -ENODEV;
+                       err = -EINVAL;
                        goto get_mem_obj_from_handle_failed;
                }
                err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
-                       peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
+                       peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
                if (err) {
                        pr_err("Failed to unmap from gpu %d/%d\n",
                               i, args->n_devices);
@@ -1603,8 +1315,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
        }
        mutex_unlock(&p->mutex);
 
-       if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) {
-               err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
+       if (kfd_flush_tlb_after_unmap(pdd->dev)) {
+               err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
                                (struct kgd_mem *) mem, true);
                if (err) {
                        pr_debug("Sync memory failed, wait interrupted by user signal\n");
@@ -1613,10 +1325,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 
                /* Flush TLBs after waiting for the page table updates to complete */
                for (i = 0; i < args->n_devices; i++) {
-                       peer = kfd_device_by_id(devices_arr[i]);
-                       if (WARN_ON_ONCE(!peer))
-                               continue;
-                       peer_pdd = kfd_get_process_device_data(peer, p);
+                       peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
                        if (WARN_ON_ONCE(!peer_pdd))
                                continue;
                        kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
@@ -1736,29 +1445,29 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
        struct kfd_ioctl_import_dmabuf_args *args = data;
        struct kfd_process_device *pdd;
        struct dma_buf *dmabuf;
-       struct kfd_dev *dev;
        int idr_handle;
        uint64_t size;
        void *mem;
        int r;
 
-       dev = kfd_device_by_id(args->gpu_id);
-       if (!dev)
-               return -EINVAL;
-
        dmabuf = dma_buf_get(args->dmabuf_fd);
        if (IS_ERR(dmabuf))
                return PTR_ERR(dmabuf);
 
        mutex_lock(&p->mutex);
+       pdd = kfd_process_device_data_by_id(p, args->gpu_id);
+       if (!pdd) {
+               r = -EINVAL;
+               goto err_unlock;
+       }
 
-       pdd = kfd_bind_process_to_device(dev, p);
+       pdd = kfd_bind_process_to_device(pdd->dev, p);
        if (IS_ERR(pdd)) {
                r = PTR_ERR(pdd);
                goto err_unlock;
        }
 
-       r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf,
+       r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
                                              args->va_addr, pdd->drm_priv,
                                              (struct kgd_mem **)&mem, &size,
                                              NULL);
@@ -1779,7 +1488,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
        return 0;
 
 err_free:
-       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
                                               pdd->drm_priv, NULL);
 err_unlock:
        mutex_unlock(&p->mutex);
@@ -1792,13 +1501,16 @@ static int kfd_ioctl_smi_events(struct file *filep,
                                struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_smi_events_args *args = data;
-       struct kfd_dev *dev;
+       struct kfd_process_device *pdd;
 
-       dev = kfd_device_by_id(args->gpuid);
-       if (!dev)
+       mutex_lock(&p->mutex);
+
+       pdd = kfd_process_device_data_by_id(p, args->gpuid);
+       mutex_unlock(&p->mutex);
+       if (!pdd)
                return -EINVAL;
 
-       return kfd_smi_event_open(dev, &args->anon_fd);
+       return kfd_smi_event_open(pdd->dev, &args->anon_fd);
 }
 
 static int kfd_ioctl_set_xnack_mode(struct file *filep,
@@ -1840,13 +1552,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
        if (!args->start_addr || !args->size)
                return -EINVAL;
 
-       mutex_lock(&p->mutex);
-
        r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
                      args->attrs);
 
-       mutex_unlock(&p->mutex);
-
        return r;
 }
 #else
@@ -1856,13 +1564,1031 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
 }
 #endif
 
-#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
-       [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
-                           .cmd_drv = 0, .name = #ioctl}
+static int criu_checkpoint_process(struct kfd_process *p,
+                            uint8_t __user *user_priv_data,
+                            uint64_t *priv_offset)
+{
+       struct kfd_criu_process_priv_data process_priv;
+       int ret;
 
-/** Ioctl table */
-static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+       memset(&process_priv, 0, sizeof(process_priv));
+
+       process_priv.version = KFD_CRIU_PRIV_VERSION;
+       /* For CR, we don't consider negative xnack mode which is used for
+        * querying without changing it, here 0 simply means disabled and 1
+        * means enabled so retry for finding a valid PTE.
+        */
+       process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
+
+       ret = copy_to_user(user_priv_data + *priv_offset,
+                               &process_priv, sizeof(process_priv));
+
+       if (ret) {
+               pr_err("Failed to copy process information to user\n");
+               ret = -EFAULT;
+       }
+
+       *priv_offset += sizeof(process_priv);
+       return ret;
+}
+
+static int criu_checkpoint_devices(struct kfd_process *p,
+                            uint32_t num_devices,
+                            uint8_t __user *user_addr,
+                            uint8_t __user *user_priv_data,
+                            uint64_t *priv_offset)
+{
+       struct kfd_criu_device_priv_data *device_priv = NULL;
+       struct kfd_criu_device_bucket *device_buckets = NULL;
+       int ret = 0, i;
+
+       device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
+       if (!device_buckets) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
+       if (!device_priv) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       for (i = 0; i < num_devices; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               device_buckets[i].user_gpu_id = pdd->user_gpu_id;
+               device_buckets[i].actual_gpu_id = pdd->dev->id;
+
+               /*
+                * priv_data does not contain useful information for now and is reserved for
+                * future use, so we do not set its contents.
+                */
+       }
+
+       ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
+       if (ret) {
+               pr_err("Failed to copy device information to user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       ret = copy_to_user(user_priv_data + *priv_offset,
+                          device_priv,
+                          num_devices * sizeof(*device_priv));
+       if (ret) {
+               pr_err("Failed to copy device information to user\n");
+               ret = -EFAULT;
+       }
+       *priv_offset += num_devices * sizeof(*device_priv);
+
+exit:
+       kvfree(device_buckets);
+       kvfree(device_priv);
+       return ret;
+}
+
+static uint32_t get_process_num_bos(struct kfd_process *p)
+{
+       uint32_t num_of_bos = 0;
+       int i;
+
+       /* Run over all PDDs of the process */
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+               void *mem;
+               int id;
+
+               idr_for_each_entry(&pdd->alloc_idr, mem, id) {
+                       struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
+
+                       if ((uint64_t)kgd_mem->va > pdd->gpuvm_base)
+                               num_of_bos++;
+               }
+       }
+       return num_of_bos;
+}
+
+static int criu_get_prime_handle(struct drm_gem_object *gobj, int flags,
+                                     u32 *shared_fd)
+{
+       struct dma_buf *dmabuf;
+       int ret;
+
+       dmabuf = amdgpu_gem_prime_export(gobj, flags);
+       if (IS_ERR(dmabuf)) {
+               ret = PTR_ERR(dmabuf);
+               pr_err("dmabuf export failed for the BO\n");
+               return ret;
+       }
+
+       ret = dma_buf_fd(dmabuf, flags);
+       if (ret < 0) {
+               pr_err("dmabuf create fd failed, ret:%d\n", ret);
+               goto out_free_dmabuf;
+       }
+
+       *shared_fd = ret;
+       return 0;
+
+out_free_dmabuf:
+       dma_buf_put(dmabuf);
+       return ret;
+}
+
+static int criu_checkpoint_bos(struct kfd_process *p,
+                              uint32_t num_bos,
+                              uint8_t __user *user_bos,
+                              uint8_t __user *user_priv_data,
+                              uint64_t *priv_offset)
+{
+       struct kfd_criu_bo_bucket *bo_buckets;
+       struct kfd_criu_bo_priv_data *bo_privs;
+       int ret = 0, pdd_index, bo_index = 0, id;
+       void *mem;
+
+       bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
+       if (!bo_buckets)
+               return -ENOMEM;
+
+       bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
+       if (!bo_privs) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
+               struct kfd_process_device *pdd = p->pdds[pdd_index];
+               struct amdgpu_bo *dumper_bo;
+               struct kgd_mem *kgd_mem;
+
+               idr_for_each_entry(&pdd->alloc_idr, mem, id) {
+                       struct kfd_criu_bo_bucket *bo_bucket;
+                       struct kfd_criu_bo_priv_data *bo_priv;
+                       int i, dev_idx = 0;
+
+                       if (!mem) {
+                               ret = -ENOMEM;
+                               goto exit;
+                       }
+
+                       kgd_mem = (struct kgd_mem *)mem;
+                       dumper_bo = kgd_mem->bo;
+
+                       if ((uint64_t)kgd_mem->va <= pdd->gpuvm_base)
+                               continue;
+
+                       bo_bucket = &bo_buckets[bo_index];
+                       bo_priv = &bo_privs[bo_index];
+
+                       bo_bucket->gpu_id = pdd->user_gpu_id;
+                       bo_bucket->addr = (uint64_t)kgd_mem->va;
+                       bo_bucket->size = amdgpu_bo_size(dumper_bo);
+                       bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
+                       bo_priv->idr_handle = id;
+
+                       if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
+                               ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
+                                                               &bo_priv->user_addr);
+                               if (ret) {
+                                       pr_err("Failed to obtain user address for user-pointer bo\n");
+                                       goto exit;
+                               }
+                       }
+                       if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+                               ret = criu_get_prime_handle(&dumper_bo->tbo.base,
+                                               bo_bucket->alloc_flags &
+                                               KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
+                                               &bo_bucket->dmabuf_fd);
+                               if (ret)
+                                       goto exit;
+                       }
+                       if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
+                               bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
+                                       KFD_MMAP_GPU_ID(pdd->dev->id);
+                       else if (bo_bucket->alloc_flags &
+                               KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
+                               bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
+                                       KFD_MMAP_GPU_ID(pdd->dev->id);
+                       else
+                               bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
+
+                       for (i = 0; i < p->n_pdds; i++) {
+                               if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
+                                       bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
+                       }
+
+                       pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
+                                       "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
+                                       bo_bucket->size,
+                                       bo_bucket->addr,
+                                       bo_bucket->offset,
+                                       bo_bucket->gpu_id,
+                                       bo_bucket->alloc_flags,
+                                       bo_priv->idr_handle);
+                       bo_index++;
+               }
+       }
+
+       ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
+       if (ret) {
+               pr_err("Failed to copy BO information to user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
+       if (ret) {
+               pr_err("Failed to copy BO priv information to user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       *priv_offset += num_bos * sizeof(*bo_privs);
+
+exit:
+       while (ret && bo_index--) {
+               if (bo_buckets[bo_index].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+                       close_fd(bo_buckets[bo_index].dmabuf_fd);
+       }
+
+       kvfree(bo_buckets);
+       kvfree(bo_privs);
+       return ret;
+}
+
+static int criu_get_process_object_info(struct kfd_process *p,
+                                       uint32_t *num_devices,
+                                       uint32_t *num_bos,
+                                       uint32_t *num_objects,
+                                       uint64_t *objs_priv_size)
+{
+       uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
+       uint32_t num_queues, num_events, num_svm_ranges;
+       int ret;
+
+       *num_devices = p->n_pdds;
+       *num_bos = get_process_num_bos(p);
+
+       ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
+       if (ret)
+               return ret;
+
+       num_events = kfd_get_num_events(p);
+
+       ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
+       if (ret)
+               return ret;
+
+       *num_objects = num_queues + num_events + num_svm_ranges;
+
+       if (objs_priv_size) {
+               priv_size = sizeof(struct kfd_criu_process_priv_data);
+               priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
+               priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
+               priv_size += queues_priv_data_size;
+               priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
+               priv_size += svm_priv_data_size;
+               *objs_priv_size = priv_size;
+       }
+       return 0;
+}
+
+static int criu_checkpoint(struct file *filep,
+                          struct kfd_process *p,
+                          struct kfd_ioctl_criu_args *args)
+{
+       int ret;
+       uint32_t num_devices, num_bos, num_objects;
+       uint64_t priv_size, priv_offset = 0;
+
+       if (!args->devices || !args->bos || !args->priv_data)
+               return -EINVAL;
+
+       mutex_lock(&p->mutex);
+
+       if (!p->n_pdds) {
+               pr_err("No pdd for given process\n");
+               ret = -ENODEV;
+               goto exit_unlock;
+       }
+
+       /* Confirm all process queues are evicted */
+       if (!p->queues_paused) {
+               pr_err("Cannot dump process when queues are not in evicted state\n");
+               /* CRIU plugin did not call op PROCESS_INFO before checkpointing */
+               ret = -EINVAL;
+               goto exit_unlock;
+       }
+
+       ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
+       if (ret)
+               goto exit_unlock;
+
+       if (num_devices != args->num_devices ||
+           num_bos != args->num_bos ||
+           num_objects != args->num_objects ||
+           priv_size != args->priv_data_size) {
+
+               ret = -EINVAL;
+               goto exit_unlock;
+       }
+
+       /* each function will store private data inside priv_data and adjust priv_offset */
+       ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
+       if (ret)
+               goto exit_unlock;
+
+       ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
+                               (uint8_t __user *)args->priv_data, &priv_offset);
+       if (ret)
+               goto exit_unlock;
+
+       ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
+                           (uint8_t __user *)args->priv_data, &priv_offset);
+       if (ret)
+               goto exit_unlock;
+
+       if (num_objects) {
+               ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
+                                                &priv_offset);
+               if (ret)
+                       goto close_bo_fds;
+
+               ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
+                                                &priv_offset);
+               if (ret)
+                       goto close_bo_fds;
+
+               ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
+               if (ret)
+                       goto close_bo_fds;
+       }
+
+close_bo_fds:
+       if (ret) {
+               /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */
+               uint32_t i;
+               struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos;
+
+               for (i = 0; i < num_bos; i++) {
+                       if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+                               close_fd(bo_buckets[i].dmabuf_fd);
+               }
+       }
+
+exit_unlock:
+       mutex_unlock(&p->mutex);
+       if (ret)
+               pr_err("Failed to dump CRIU ret:%d\n", ret);
+       else
+               pr_debug("CRIU dump ret:%d\n", ret);
+
+       return ret;
+}
+
+static int criu_restore_process(struct kfd_process *p,
+                               struct kfd_ioctl_criu_args *args,
+                               uint64_t *priv_offset,
+                               uint64_t max_priv_data_size)
+{
+       int ret = 0;
+       struct kfd_criu_process_priv_data process_priv;
+
+       if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
+               return -EINVAL;
+
+       ret = copy_from_user(&process_priv,
+                               (void __user *)(args->priv_data + *priv_offset),
+                               sizeof(process_priv));
+       if (ret) {
+               pr_err("Failed to copy process private information from user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+       *priv_offset += sizeof(process_priv);
+
+       if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
+               pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
+                       process_priv.version, KFD_CRIU_PRIV_VERSION);
+               return -EINVAL;
+       }
+
+       pr_debug("Setting XNACK mode\n");
+       if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
+               pr_err("xnack mode cannot be set\n");
+               ret = -EPERM;
+               goto exit;
+       } else {
+               pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
+               p->xnack_enabled = process_priv.xnack_mode;
+       }
+
+exit:
+       return ret;
+}
+
+static int criu_restore_devices(struct kfd_process *p,
+                               struct kfd_ioctl_criu_args *args,
+                               uint64_t *priv_offset,
+                               uint64_t max_priv_data_size)
+{
+       struct kfd_criu_device_bucket *device_buckets;
+       struct kfd_criu_device_priv_data *device_privs;
+       int ret = 0;
+       uint32_t i;
+
+       if (args->num_devices != p->n_pdds)
+               return -EINVAL;
+
+       if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
+               return -EINVAL;
+
+       device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
+       if (!device_buckets)
+               return -ENOMEM;
+
+       ret = copy_from_user(device_buckets, (void __user *)args->devices,
+                               args->num_devices * sizeof(*device_buckets));
+       if (ret) {
+               pr_err("Failed to copy devices buckets from user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       for (i = 0; i < args->num_devices; i++) {
+               struct kfd_dev *dev;
+               struct kfd_process_device *pdd;
+               struct file *drm_file;
+
+               /* device private data is not currently used */
+
+               if (!device_buckets[i].user_gpu_id) {
+                       pr_err("Invalid user gpu_id\n");
+                       ret = -EINVAL;
+                       goto exit;
+               }
+
+               dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
+               if (!dev) {
+                       pr_err("Failed to find device with gpu_id = %x\n",
+                               device_buckets[i].actual_gpu_id);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+
+               pdd = kfd_get_process_device_data(dev, p);
+               if (!pdd) {
+                       pr_err("Failed to get pdd for gpu_id = %x\n",
+                                       device_buckets[i].actual_gpu_id);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               pdd->user_gpu_id = device_buckets[i].user_gpu_id;
+
+               drm_file = fget(device_buckets[i].drm_fd);
+               if (!drm_file) {
+                       pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
+                               device_buckets[i].drm_fd);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+
+               if (pdd->drm_file) {
+                       ret = -EINVAL;
+                       goto exit;
+               }
+
+               /* create the vm using render nodes for kfd pdd */
+               if (kfd_process_device_init_vm(pdd, drm_file)) {
+                       pr_err("could not init vm for given pdd\n");
+                       /* On success, the PDD keeps the drm_file reference */
+                       fput(drm_file);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+               /*
+                * pdd now already has the vm bound to render node so below api won't create a new
+                * exclusive kfd mapping but use existing one with renderDXXX but is still needed
+                * for iommu v2 binding  and runtime pm.
+                */
+               pdd = kfd_bind_process_to_device(dev, p);
+               if (IS_ERR(pdd)) {
+                       ret = PTR_ERR(pdd);
+                       goto exit;
+               }
+       }
+
+       /*
+        * We are not copying device private data from user as we are not using the data for now,
+        * but we still adjust for its private data.
+        */
+       *priv_offset += args->num_devices * sizeof(*device_privs);
+
+exit:
+       kfree(device_buckets);
+       return ret;
+}
+
+static int criu_restore_bos(struct kfd_process *p,
+                           struct kfd_ioctl_criu_args *args,
+                           uint64_t *priv_offset,
+                           uint64_t max_priv_data_size)
+{
+       struct kfd_criu_bo_bucket *bo_buckets;
+       struct kfd_criu_bo_priv_data *bo_privs;
+       const bool criu_resume = true;
+       bool flush_tlbs = false;
+       int ret = 0, j = 0;
+       uint32_t i = 0;
+
+       if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
+               return -EINVAL;
+
+       /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
+       amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
+
+       bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
+       if (!bo_buckets)
+               return -ENOMEM;
+
+       ret = copy_from_user(bo_buckets, (void __user *)args->bos,
+                            args->num_bos * sizeof(*bo_buckets));
+       if (ret) {
+               pr_err("Failed to copy BOs information from user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
+       if (!bo_privs) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
+                            args->num_bos * sizeof(*bo_privs));
+       if (ret) {
+               pr_err("Failed to copy BOs information from user\n");
+               ret = -EFAULT;
+               goto exit;
+       }
+       *priv_offset += args->num_bos * sizeof(*bo_privs);
+
+       /* Create and map new BOs */
+       for (; i < args->num_bos; i++) {
+               struct kfd_criu_bo_bucket *bo_bucket;
+               struct kfd_criu_bo_priv_data *bo_priv;
+               struct kfd_dev *dev;
+               struct kfd_process_device *pdd;
+               struct kgd_mem *kgd_mem;
+               void *mem;
+               u64 offset;
+               int idr_handle;
+
+               bo_bucket = &bo_buckets[i];
+               bo_priv = &bo_privs[i];
+
+               pr_debug("kfd restore ioctl - bo_bucket[%d]:\n", i);
+               pr_debug("size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
+                       "gpu_id = 0x%x alloc_flags = 0x%x\n"
+                       "idr_handle = 0x%x\n",
+                       bo_bucket->size,
+                       bo_bucket->addr,
+                       bo_bucket->offset,
+                       bo_bucket->gpu_id,
+                       bo_bucket->alloc_flags,
+                       bo_priv->idr_handle);
+
+               pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
+               if (!pdd) {
+                       pr_err("Failed to get pdd\n");
+                       ret = -ENODEV;
+                       goto exit;
+               }
+               dev = pdd->dev;
+
+               if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+                       pr_debug("restore ioctl: KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL\n");
+                       if (bo_bucket->size != kfd_doorbell_process_slice(dev)) {
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+                       offset = kfd_get_process_doorbells(pdd);
+               } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+                       /* MMIO BOs need remapped bus address */
+                       pr_debug("restore ioctl :KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP\n");
+                       if (bo_bucket->size != PAGE_SIZE) {
+                               pr_err("Invalid page size\n");
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+                       offset = dev->adev->rmmio_remap.bus_addr;
+                       if (!offset) {
+                               pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
+                               ret = -ENOMEM;
+                               goto exit;
+                       }
+               } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
+                       offset = bo_priv->user_addr;
+               }
+               /* Create the BO */
+               ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(dev->adev,
+                                               bo_bucket->addr,
+                                               bo_bucket->size,
+                                               pdd->drm_priv,
+                                               (struct kgd_mem **) &mem,
+                                               &offset,
+                                               bo_bucket->alloc_flags,
+                                               criu_resume);
+               if (ret) {
+                       pr_err("Could not create the BO\n");
+                       ret = -ENOMEM;
+                       goto exit;
+               }
+               pr_debug("New BO created: size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n",
+                       bo_bucket->size, bo_bucket->addr, offset);
+
+               /* Restore previuos IDR handle */
+               pr_debug("Restoring old IDR handle for the BO");
+               idr_handle = idr_alloc(&pdd->alloc_idr, mem,
+                                      bo_priv->idr_handle,
+                                      bo_priv->idr_handle + 1, GFP_KERNEL);
+
+               if (idr_handle < 0) {
+                       pr_err("Could not allocate idr\n");
+                       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev,
+                                               (struct kgd_mem *)mem,
+                                               pdd->drm_priv, NULL);
+                       ret = -ENOMEM;
+                       goto exit;
+               }
+
+               if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
+                       bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL |
+                               KFD_MMAP_GPU_ID(pdd->dev->id);
+               if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+                       bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO |
+                               KFD_MMAP_GPU_ID(pdd->dev->id);
+               } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
+                       bo_bucket->restored_offset = offset;
+                       pr_debug("updating offset for GTT\n");
+               } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+                       bo_bucket->restored_offset = offset;
+                       /* Update the VRAM usage count */
+                       WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
+                       pr_debug("updating offset for VRAM\n");
+               }
+
+               /* now map these BOs to GPU/s */
+               for (j = 0; j < p->n_pdds; j++) {
+                       struct kfd_dev *peer;
+                       struct kfd_process_device *peer_pdd;
+                       bool table_freed = false;
+
+                       if (!bo_priv->mapped_gpuids[j])
+                               break;
+
+                       peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
+                       if (!peer_pdd) {
+                               ret = -EINVAL;
+                               goto exit;
+                       }
+                       peer = peer_pdd->dev;
+
+                       peer_pdd = kfd_bind_process_to_device(peer, p);
+                       if (IS_ERR(peer_pdd)) {
+                               ret = PTR_ERR(peer_pdd);
+                               goto exit;
+                       }
+                       pr_debug("map mem in restore ioctl -> 0x%llx\n",
+                                ((struct kgd_mem *)mem)->va);
+                       ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev,
+                               (struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed);
+                       if (ret) {
+                               pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
+                               goto exit;
+                       }
+                       if (table_freed)
+                               flush_tlbs = true;
+               }
+
+               ret = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
+                                                     (struct kgd_mem *) mem, true);
+               if (ret) {
+                       pr_debug("Sync memory failed, wait interrupted by user signal\n");
+                       goto exit;
+               }
+
+               pr_debug("map memory was successful for the BO\n");
+               /* create the dmabuf object and export the bo */
+               kgd_mem = (struct kgd_mem *)mem;
+               if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+                       ret = criu_get_prime_handle(&kgd_mem->bo->tbo.base,
+                                                   DRM_RDWR,
+                                                   &bo_bucket->dmabuf_fd);
+                       if (ret)
+                               goto exit;
+               }
+       } /* done */
+
+       if (flush_tlbs) {
+               /* Flush TLBs after waiting for the page table updates to complete */
+               for (j = 0; j < p->n_pdds; j++) {
+                       struct kfd_dev *peer;
+                       struct kfd_process_device *pdd = p->pdds[j];
+                       struct kfd_process_device *peer_pdd;
+
+                       peer = kfd_device_by_id(pdd->dev->id);
+                       if (WARN_ON_ONCE(!peer))
+                               continue;
+                       peer_pdd = kfd_get_process_device_data(peer, p);
+                       if (WARN_ON_ONCE(!peer_pdd))
+                               continue;
+                       kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
+               }
+       }
+
+       /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
+       ret = copy_to_user((void __user *)args->bos,
+                               bo_buckets,
+                               (args->num_bos * sizeof(*bo_buckets)));
+       if (ret)
+               ret = -EFAULT;
+
+exit:
+       while (ret && i--) {
+               if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+                       close_fd(bo_buckets[i].dmabuf_fd);
+       }
+       kvfree(bo_buckets);
+       kvfree(bo_privs);
+       return ret;
+}
+
+static int criu_restore_objects(struct file *filep,
+                               struct kfd_process *p,
+                               struct kfd_ioctl_criu_args *args,
+                               uint64_t *priv_offset,
+                               uint64_t max_priv_data_size)
+{
+       int ret = 0;
+       uint32_t i;
+
+       BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
+       BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
+       BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
+
+       for (i = 0; i < args->num_objects; i++) {
+               uint32_t object_type;
+
+               if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
+                       pr_err("Invalid private data size\n");
+                       return -EINVAL;
+               }
+
+               ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
+               if (ret) {
+                       pr_err("Failed to copy private information from user\n");
+                       goto exit;
+               }
+
+               switch (object_type) {
+               case KFD_CRIU_OBJECT_TYPE_QUEUE:
+                       ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
+                                                    priv_offset, max_priv_data_size);
+                       if (ret)
+                               goto exit;
+                       break;
+               case KFD_CRIU_OBJECT_TYPE_EVENT:
+                       ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
+                                                    priv_offset, max_priv_data_size);
+                       if (ret)
+                               goto exit;
+                       break;
+               case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
+                       ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
+                                                    priv_offset, max_priv_data_size);
+                       if (ret)
+                               goto exit;
+                       break;
+               default:
+                       pr_err("Invalid object type:%u at index:%d\n", object_type, i);
+                       ret = -EINVAL;
+                       goto exit;
+               }
+       }
+exit:
+       return ret;
+}
+
+static int criu_restore(struct file *filep,
+                       struct kfd_process *p,
+                       struct kfd_ioctl_criu_args *args)
+{
+       uint64_t priv_offset = 0;
+       int ret = 0;
+
+       pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
+                args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
+
+       if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
+           !args->num_devices || !args->num_bos)
+               return -EINVAL;
+
+       mutex_lock(&p->mutex);
+
+       /*
+        * Set the process to evicted state to avoid running any new queues before all the memory
+        * mappings are ready.
+        */
+       ret = kfd_process_evict_queues(p);
+       if (ret)
+               goto exit_unlock;
+
+       /* Each function will adjust priv_offset based on how many bytes they consumed */
+       ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
+       if (ret)
+               goto exit_unlock;
+
+       ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
+       if (ret)
+               goto exit_unlock;
+
+       ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
+       if (ret)
+               goto exit_unlock;
+
+       ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
+       if (ret)
+               goto exit_unlock;
+
+       if (priv_offset != args->priv_data_size) {
+               pr_err("Invalid private data size\n");
+               ret = -EINVAL;
+       }
+
+exit_unlock:
+       mutex_unlock(&p->mutex);
+       if (ret)
+               pr_err("Failed to restore CRIU ret:%d\n", ret);
+       else
+               pr_debug("CRIU restore successful\n");
+
+       return ret;
+}
+
+static int criu_unpause(struct file *filep,
+                       struct kfd_process *p,
+                       struct kfd_ioctl_criu_args *args)
+{
+       int ret;
+
+       mutex_lock(&p->mutex);
+
+       if (!p->queues_paused) {
+               mutex_unlock(&p->mutex);
+               return -EINVAL;
+       }
+
+       ret = kfd_process_restore_queues(p);
+       if (ret)
+               pr_err("Failed to unpause queues ret:%d\n", ret);
+       else
+               p->queues_paused = false;
+
+       mutex_unlock(&p->mutex);
+
+       return ret;
+}
+
+static int criu_resume(struct file *filep,
+                       struct kfd_process *p,
+                       struct kfd_ioctl_criu_args *args)
+{
+       struct kfd_process *target = NULL;
+       struct pid *pid = NULL;
+       int ret = 0;
+
+       pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
+                args->pid);
+
+       pid = find_get_pid(args->pid);
+       if (!pid) {
+               pr_err("Cannot find pid info for %i\n", args->pid);
+               return -ESRCH;
+       }
+
+       pr_debug("calling kfd_lookup_process_by_pid\n");
+       target = kfd_lookup_process_by_pid(pid);
+
+       put_pid(pid);
+
+       if (!target) {
+               pr_debug("Cannot find process info for %i\n", args->pid);
+               return -ESRCH;
+       }
+
+       mutex_lock(&target->mutex);
+       ret = kfd_criu_resume_svm(target);
+       if (ret) {
+               pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
+               goto exit;
+       }
+
+       ret =  amdgpu_amdkfd_criu_resume(target->kgd_process_info);
+       if (ret)
+               pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
+
+exit:
+       mutex_unlock(&target->mutex);
+
+       kfd_unref_process(target);
+       return ret;
+}
+
+static int criu_process_info(struct file *filep,
+                               struct kfd_process *p,
+                               struct kfd_ioctl_criu_args *args)
+{
+       int ret = 0;
+
+       mutex_lock(&p->mutex);
+
+       if (!p->n_pdds) {
+               pr_err("No pdd for given process\n");
+               ret = -ENODEV;
+               goto err_unlock;
+       }
+
+       ret = kfd_process_evict_queues(p);
+       if (ret)
+               goto err_unlock;
+
+       p->queues_paused = true;
+
+       args->pid = task_pid_nr_ns(p->lead_thread,
+                                       task_active_pid_ns(p->lead_thread));
+
+       ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
+                                          &args->num_objects, &args->priv_data_size);
+       if (ret)
+               goto err_unlock;
+
+       dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
+                               args->num_devices, args->num_bos, args->num_objects,
+                               args->priv_data_size);
+
+err_unlock:
+       if (ret) {
+               kfd_process_restore_queues(p);
+               p->queues_paused = false;
+       }
+       mutex_unlock(&p->mutex);
+       return ret;
+}
+
+static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
+{
+       struct kfd_ioctl_criu_args *args = data;
+       int ret;
+
+       dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
+       switch (args->op) {
+       case KFD_CRIU_OP_PROCESS_INFO:
+               ret = criu_process_info(filep, p, args);
+               break;
+       case KFD_CRIU_OP_CHECKPOINT:
+               ret = criu_checkpoint(filep, p, args);
+               break;
+       case KFD_CRIU_OP_UNPAUSE:
+               ret = criu_unpause(filep, p, args);
+               break;
+       case KFD_CRIU_OP_RESTORE:
+               ret = criu_restore(filep, p, args);
+               break;
+       case KFD_CRIU_OP_RESUME:
+               ret = criu_resume(filep, p, args);
+               break;
+       default:
+               dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret)
+               dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
+
+       return ret;
+}
+
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+       [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
+                           .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
                        kfd_ioctl_get_version, 0),
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
@@ -1898,16 +2624,16 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
                        kfd_ioctl_wait_events, 0),
 
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER,
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
                        kfd_ioctl_dbg_register, 0),
 
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
                        kfd_ioctl_dbg_unregister, 0),
 
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
                        kfd_ioctl_dbg_address_watch, 0),
 
-       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL,
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
                        kfd_ioctl_dbg_wave_control, 0),
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
@@ -1959,6 +2685,10 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
                        kfd_ioctl_set_xnack_mode, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
+                       kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
+
 };
 
 #define AMDKFD_CORE_IOCTL_COUNT        ARRAY_SIZE(amdkfd_ioctls)
@@ -1973,6 +2703,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
        char *kdata = NULL;
        unsigned int usize, asize;
        int retcode = -EINVAL;
+       bool ptrace_attached = false;
 
        if (nr >= AMDKFD_CORE_IOCTL_COUNT)
                goto err_i1;
@@ -1998,7 +2729,15 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
         * processes need to create their own KFD device context.
         */
        process = filep->private_data;
-       if (process->lead_thread != current->group_leader) {
+
+       rcu_read_lock();
+       if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
+           ptrace_parent(process->lead_thread) == current)
+               ptrace_attached = true;
+       rcu_read_unlock();
+
+       if (process->lead_thread != current->group_leader
+           && !ptrace_attached) {
                dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
                retcode = -EBADF;
                goto err_i1;
@@ -2013,6 +2752,19 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
                goto err_i1;
        }
 
+       /*
+        * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
+        * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
+        * more priviledged access.
+        */
+       if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
+               if (!capable(CAP_CHECKPOINT_RESTORE) &&
+                                               !capable(CAP_SYS_ADMIN)) {
+                       retcode = -EACCES;
+                       goto err_i1;
+               }
+       }
+
        if (cmd & (IOC_IN | IOC_OUT)) {
                if (asize <= sizeof(stack_kdata)) {
                        kdata = stack_kdata;
index 9624bbe8b50138cfc73fa61527cddbd1116fb80c..bb6e49661d133eb10a58ca93404d36a8faa8b5c2 100644 (file)
@@ -1411,6 +1411,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
                case IP_VERSION(10, 1, 10):
                case IP_VERSION(10, 1, 2):
                case IP_VERSION(10, 1, 3):
+               case IP_VERSION(10, 1, 4):
                        pcache_info = navi10_cache_info;
                        num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
                        break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
deleted file mode 100644 (file)
index 1e30717..0000000
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/log2.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-
-#include "kfd_pm4_headers.h"
-#include "kfd_pm4_headers_diq.h"
-#include "kfd_kernel_queue.h"
-#include "kfd_priv.h"
-#include "kfd_pm4_opcodes.h"
-#include "cik_regs.h"
-#include "kfd_dbgmgr.h"
-#include "kfd_dbgdev.h"
-#include "kfd_device_queue_manager.h"
-
-static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
-{
-       dev->kfd2kgd->address_watch_disable(dev->adev);
-}
-
-static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
-                               u32 pasid, uint64_t vmid0_address,
-                               uint32_t *packet_buff, size_t size_in_bytes)
-{
-       struct pm4__release_mem *rm_packet;
-       struct pm4__indirect_buffer_pasid *ib_packet;
-       struct kfd_mem_obj *mem_obj;
-       size_t pq_packets_size_in_bytes;
-       union ULARGE_INTEGER *largep;
-       union ULARGE_INTEGER addr;
-       struct kernel_queue *kq;
-       uint64_t *rm_state;
-       unsigned int *ib_packet_buff;
-       int status;
-
-       if (WARN_ON(!size_in_bytes))
-               return -EINVAL;
-
-       kq = dbgdev->kq;
-
-       pq_packets_size_in_bytes = sizeof(struct pm4__release_mem) +
-                               sizeof(struct pm4__indirect_buffer_pasid);
-
-       /*
-        * We acquire a buffer from DIQ
-        * The receive packet buff will be sitting on the Indirect Buffer
-        * and in the PQ we put the IB packet + sync packet(s).
-        */
-       status = kq_acquire_packet_buffer(kq,
-                               pq_packets_size_in_bytes / sizeof(uint32_t),
-                               &ib_packet_buff);
-       if (status) {
-               pr_err("kq_acquire_packet_buffer failed\n");
-               return status;
-       }
-
-       memset(ib_packet_buff, 0, pq_packets_size_in_bytes);
-
-       ib_packet = (struct pm4__indirect_buffer_pasid *) (ib_packet_buff);
-
-       ib_packet->header.count = 3;
-       ib_packet->header.opcode = IT_INDIRECT_BUFFER_PASID;
-       ib_packet->header.type = PM4_TYPE_3;
-
-       largep = (union ULARGE_INTEGER *) &vmid0_address;
-
-       ib_packet->bitfields2.ib_base_lo = largep->u.low_part >> 2;
-       ib_packet->bitfields3.ib_base_hi = largep->u.high_part;
-
-       ib_packet->control = (1 << 23) | (1 << 31) |
-                       ((size_in_bytes / 4) & 0xfffff);
-
-       ib_packet->bitfields5.pasid = pasid;
-
-       /*
-        * for now we use release mem for GPU-CPU synchronization
-        * Consider WaitRegMem + WriteData as a better alternative
-        * we get a GART allocations ( gpu/cpu mapping),
-        * for the sync variable, and wait until:
-        * (a) Sync with HW
-        * (b) Sync var is written by CP to mem.
-        */
-       rm_packet = (struct pm4__release_mem *) (ib_packet_buff +
-                       (sizeof(struct pm4__indirect_buffer_pasid) /
-                                       sizeof(unsigned int)));
-
-       status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
-                                       &mem_obj);
-
-       if (status) {
-               pr_err("Failed to allocate GART memory\n");
-               kq_rollback_packet(kq);
-               return status;
-       }
-
-       rm_state = (uint64_t *) mem_obj->cpu_ptr;
-
-       *rm_state = QUEUESTATE__ACTIVE_COMPLETION_PENDING;
-
-       rm_packet->header.opcode = IT_RELEASE_MEM;
-       rm_packet->header.type = PM4_TYPE_3;
-       rm_packet->header.count = sizeof(struct pm4__release_mem) / 4 - 2;
-
-       rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
-       rm_packet->bitfields2.event_index =
-                               event_index___release_mem__end_of_pipe;
-
-       rm_packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
-       rm_packet->bitfields2.atc = 0;
-       rm_packet->bitfields2.tc_wb_action_ena = 1;
-
-       addr.quad_part = mem_obj->gpu_addr;
-
-       rm_packet->bitfields4.address_lo_32b = addr.u.low_part >> 2;
-       rm_packet->address_hi = addr.u.high_part;
-
-       rm_packet->bitfields3.data_sel =
-                               data_sel___release_mem__send_64_bit_data;
-
-       rm_packet->bitfields3.int_sel =
-                       int_sel___release_mem__send_data_after_write_confirm;
-
-       rm_packet->bitfields3.dst_sel =
-                       dst_sel___release_mem__memory_controller;
-
-       rm_packet->data_lo = QUEUESTATE__ACTIVE;
-
-       kq_submit_packet(kq);
-
-       /* Wait till CP writes sync code: */
-       status = amdkfd_fence_wait_timeout(
-                       rm_state,
-                       QUEUESTATE__ACTIVE, 1500);
-
-       kfd_gtt_sa_free(dbgdev->dev, mem_obj);
-
-       return status;
-}
-
-static int dbgdev_register_nodiq(struct kfd_dbgdev *dbgdev)
-{
-       /*
-        * no action is needed in this case,
-        * just make sure diq will not be used
-        */
-
-       dbgdev->kq = NULL;
-
-       return 0;
-}
-
-static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
-{
-       struct queue_properties properties;
-       unsigned int qid;
-       struct kernel_queue *kq = NULL;
-       int status;
-
-       properties.type = KFD_QUEUE_TYPE_DIQ;
-
-       status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
-                               &properties, &qid, NULL);
-
-       if (status) {
-               pr_err("Failed to create DIQ\n");
-               return status;
-       }
-
-       pr_debug("DIQ Created with queue id: %d\n", qid);
-
-       kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
-
-       if (!kq) {
-               pr_err("Error getting DIQ\n");
-               pqm_destroy_queue(dbgdev->pqm, qid);
-               return -EFAULT;
-       }
-
-       dbgdev->kq = kq;
-
-       return status;
-}
-
-static int dbgdev_unregister_nodiq(struct kfd_dbgdev *dbgdev)
-{
-       /* disable watch address */
-       dbgdev_address_watch_disable_nodiq(dbgdev->dev);
-       return 0;
-}
-
-static int dbgdev_unregister_diq(struct kfd_dbgdev *dbgdev)
-{
-       /* todo - disable address watch */
-       int status;
-
-       status = pqm_destroy_queue(dbgdev->pqm,
-                       dbgdev->kq->queue->properties.queue_id);
-       dbgdev->kq = NULL;
-
-       return status;
-}
-
-static void dbgdev_address_watch_set_registers(
-                       const struct dbg_address_watch_info *adw_info,
-                       union TCP_WATCH_ADDR_H_BITS *addrHi,
-                       union TCP_WATCH_ADDR_L_BITS *addrLo,
-                       union TCP_WATCH_CNTL_BITS *cntl,
-                       unsigned int index, unsigned int vmid)
-{
-       union ULARGE_INTEGER addr;
-
-       addr.quad_part = 0;
-       addrHi->u32All = 0;
-       addrLo->u32All = 0;
-       cntl->u32All = 0;
-
-       if (adw_info->watch_mask)
-               cntl->bitfields.mask =
-                       (uint32_t) (adw_info->watch_mask[index] &
-                                       ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
-       else
-               cntl->bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
-
-       addr.quad_part = (unsigned long long) adw_info->watch_address[index];
-
-       addrHi->bitfields.addr = addr.u.high_part &
-                                       ADDRESS_WATCH_REG_ADDHIGH_MASK;
-       addrLo->bitfields.addr =
-                       (addr.u.low_part >> ADDRESS_WATCH_REG_ADDLOW_SHIFT);
-
-       cntl->bitfields.mode = adw_info->watch_mode[index];
-       cntl->bitfields.vmid = (uint32_t) vmid;
-       /* for now assume it is an ATC address */
-       cntl->u32All |= ADDRESS_WATCH_REG_CNTL_ATC_BIT;
-
-       pr_debug("\t\t%20s %08x\n", "set reg mask :", cntl->bitfields.mask);
-       pr_debug("\t\t%20s %08x\n", "set reg add high :",
-                       addrHi->bitfields.addr);
-       pr_debug("\t\t%20s %08x\n", "set reg add low :",
-                       addrLo->bitfields.addr);
-}
-
-static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
-                                     struct dbg_address_watch_info *adw_info)
-{
-       union TCP_WATCH_ADDR_H_BITS addrHi;
-       union TCP_WATCH_ADDR_L_BITS addrLo;
-       union TCP_WATCH_CNTL_BITS cntl;
-       struct kfd_process_device *pdd;
-       unsigned int i;
-
-       /* taking the vmid for that process on the safe way using pdd */
-       pdd = kfd_get_process_device_data(dbgdev->dev,
-                                       adw_info->process);
-       if (!pdd) {
-               pr_err("Failed to get pdd for wave control no DIQ\n");
-               return -EFAULT;
-       }
-
-       addrHi.u32All = 0;
-       addrLo.u32All = 0;
-       cntl.u32All = 0;
-
-       if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
-                       (adw_info->num_watch_points == 0)) {
-               pr_err("num_watch_points is invalid\n");
-               return -EINVAL;
-       }
-
-       if (!adw_info->watch_mode || !adw_info->watch_address) {
-               pr_err("adw_info fields are not valid\n");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < adw_info->num_watch_points; i++) {
-               dbgdev_address_watch_set_registers(adw_info, &addrHi, &addrLo,
-                                               &cntl, i, pdd->qpd.vmid);
-
-               pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
-               pr_debug("\t\t%20s %08x\n", "register index :", i);
-               pr_debug("\t\t%20s %08x\n", "vmid is :", pdd->qpd.vmid);
-               pr_debug("\t\t%20s %08x\n", "Address Low is :",
-                               addrLo.bitfields.addr);
-               pr_debug("\t\t%20s %08x\n", "Address high is :",
-                               addrHi.bitfields.addr);
-               pr_debug("\t\t%20s %08x\n", "Address high is :",
-                               addrHi.bitfields.addr);
-               pr_debug("\t\t%20s %08x\n", "Control Mask is :",
-                               cntl.bitfields.mask);
-               pr_debug("\t\t%20s %08x\n", "Control Mode is :",
-                               cntl.bitfields.mode);
-               pr_debug("\t\t%20s %08x\n", "Control Vmid is :",
-                               cntl.bitfields.vmid);
-               pr_debug("\t\t%20s %08x\n", "Control atc  is :",
-                               cntl.bitfields.atc);
-               pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
-
-               pdd->dev->kfd2kgd->address_watch_execute(
-                                               dbgdev->dev->adev,
-                                               i,
-                                               cntl.u32All,
-                                               addrHi.u32All,
-                                               addrLo.u32All);
-       }
-
-       return 0;
-}
-
-static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
-                                   struct dbg_address_watch_info *adw_info)
-{
-       struct pm4__set_config_reg *packets_vec;
-       union TCP_WATCH_ADDR_H_BITS addrHi;
-       union TCP_WATCH_ADDR_L_BITS addrLo;
-       union TCP_WATCH_CNTL_BITS cntl;
-       struct kfd_mem_obj *mem_obj;
-       unsigned int aw_reg_add_dword;
-       uint32_t *packet_buff_uint;
-       unsigned int i;
-       int status;
-       size_t ib_size = sizeof(struct pm4__set_config_reg) * 4;
-       /* we do not control the vmid in DIQ mode, just a place holder */
-       unsigned int vmid = 0;
-
-       addrHi.u32All = 0;
-       addrLo.u32All = 0;
-       cntl.u32All = 0;
-
-       if ((adw_info->num_watch_points > MAX_WATCH_ADDRESSES) ||
-                       (adw_info->num_watch_points == 0)) {
-               pr_err("num_watch_points is invalid\n");
-               return -EINVAL;
-       }
-
-       if (!adw_info->watch_mode || !adw_info->watch_address) {
-               pr_err("adw_info fields are not valid\n");
-               return -EINVAL;
-       }
-
-       status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
-
-       if (status) {
-               pr_err("Failed to allocate GART memory\n");
-               return status;
-       }
-
-       packet_buff_uint = mem_obj->cpu_ptr;
-
-       memset(packet_buff_uint, 0, ib_size);
-
-       packets_vec = (struct pm4__set_config_reg *) (packet_buff_uint);
-
-       packets_vec[0].header.count = 1;
-       packets_vec[0].header.opcode = IT_SET_CONFIG_REG;
-       packets_vec[0].header.type = PM4_TYPE_3;
-       packets_vec[0].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET;
-       packets_vec[0].bitfields2.insert_vmid = 1;
-       packets_vec[1].ordinal1 = packets_vec[0].ordinal1;
-       packets_vec[1].bitfields2.insert_vmid = 0;
-       packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
-       packets_vec[2].bitfields2.insert_vmid = 0;
-       packets_vec[3].ordinal1 = packets_vec[0].ordinal1;
-       packets_vec[3].bitfields2.vmid_shift = ADDRESS_WATCH_CNTL_OFFSET;
-       packets_vec[3].bitfields2.insert_vmid = 1;
-
-       for (i = 0; i < adw_info->num_watch_points; i++) {
-               dbgdev_address_watch_set_registers(adw_info,
-                                               &addrHi,
-                                               &addrLo,
-                                               &cntl,
-                                               i,
-                                               vmid);
-
-               pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
-               pr_debug("\t\t%20s %08x\n", "register index :", i);
-               pr_debug("\t\t%20s %08x\n", "vmid is :", vmid);
-               pr_debug("\t\t%20s %p\n", "Add ptr is :",
-                               adw_info->watch_address);
-               pr_debug("\t\t%20s %08llx\n", "Add     is :",
-                               adw_info->watch_address[i]);
-               pr_debug("\t\t%20s %08x\n", "Address Low is :",
-                               addrLo.bitfields.addr);
-               pr_debug("\t\t%20s %08x\n", "Address high is :",
-                               addrHi.bitfields.addr);
-               pr_debug("\t\t%20s %08x\n", "Control Mask is :",
-                               cntl.bitfields.mask);
-               pr_debug("\t\t%20s %08x\n", "Control Mode is :",
-                               cntl.bitfields.mode);
-               pr_debug("\t\t%20s %08x\n", "Control Vmid is :",
-                               cntl.bitfields.vmid);
-               pr_debug("\t\t%20s %08x\n", "Control atc  is :",
-                               cntl.bitfields.atc);
-               pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *");
-
-               aw_reg_add_dword =
-                               dbgdev->dev->kfd2kgd->address_watch_get_offset(
-                                       dbgdev->dev->adev,
-                                       i,
-                                       ADDRESS_WATCH_REG_CNTL);
-
-               packets_vec[0].bitfields2.reg_offset =
-                                       aw_reg_add_dword - AMD_CONFIG_REG_BASE;
-
-               packets_vec[0].reg_data[0] = cntl.u32All;
-
-               aw_reg_add_dword =
-                               dbgdev->dev->kfd2kgd->address_watch_get_offset(
-                                       dbgdev->dev->adev,
-                                       i,
-                                       ADDRESS_WATCH_REG_ADDR_HI);
-
-               packets_vec[1].bitfields2.reg_offset =
-                                       aw_reg_add_dword - AMD_CONFIG_REG_BASE;
-               packets_vec[1].reg_data[0] = addrHi.u32All;
-
-               aw_reg_add_dword =
-                               dbgdev->dev->kfd2kgd->address_watch_get_offset(
-                                       dbgdev->dev->adev,
-                                       i,
-                                       ADDRESS_WATCH_REG_ADDR_LO);
-
-               packets_vec[2].bitfields2.reg_offset =
-                               aw_reg_add_dword - AMD_CONFIG_REG_BASE;
-               packets_vec[2].reg_data[0] = addrLo.u32All;
-
-               /* enable watch flag if address is not zero*/
-               if (adw_info->watch_address[i] > 0)
-                       cntl.bitfields.valid = 1;
-               else
-                       cntl.bitfields.valid = 0;
-
-               aw_reg_add_dword =
-                               dbgdev->dev->kfd2kgd->address_watch_get_offset(
-                                       dbgdev->dev->adev,
-                                       i,
-                                       ADDRESS_WATCH_REG_CNTL);
-
-               packets_vec[3].bitfields2.reg_offset =
-                                       aw_reg_add_dword - AMD_CONFIG_REG_BASE;
-               packets_vec[3].reg_data[0] = cntl.u32All;
-
-               status = dbgdev_diq_submit_ib(
-                                       dbgdev,
-                                       adw_info->process->pasid,
-                                       mem_obj->gpu_addr,
-                                       packet_buff_uint,
-                                       ib_size);
-
-               if (status) {
-                       pr_err("Failed to submit IB to DIQ\n");
-                       break;
-               }
-       }
-
-       kfd_gtt_sa_free(dbgdev->dev, mem_obj);
-       return status;
-}
-
-static int dbgdev_wave_control_set_registers(
-                               struct dbg_wave_control_info *wac_info,
-                               union SQ_CMD_BITS *in_reg_sq_cmd,
-                               union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
-{
-       int status = 0;
-       union SQ_CMD_BITS reg_sq_cmd;
-       union GRBM_GFX_INDEX_BITS reg_gfx_index;
-       struct HsaDbgWaveMsgAMDGen2 *pMsg;
-
-       reg_sq_cmd.u32All = 0;
-       reg_gfx_index.u32All = 0;
-       pMsg = &wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2;
-
-       switch (wac_info->mode) {
-       /* Send command to single wave */
-       case HSA_DBG_WAVEMODE_SINGLE:
-               /*
-                * Limit access to the process waves only,
-                * by setting vmid check
-                */
-               reg_sq_cmd.bits.check_vmid = 1;
-               reg_sq_cmd.bits.simd_id = pMsg->ui32.SIMD;
-               reg_sq_cmd.bits.wave_id = pMsg->ui32.WaveId;
-               reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_SINGLE;
-
-               reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
-               reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
-               reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
-
-               break;
-
-       /* Send command to all waves with matching VMID */
-       case HSA_DBG_WAVEMODE_BROADCAST_PROCESS:
-
-               reg_gfx_index.bits.sh_broadcast_writes = 1;
-               reg_gfx_index.bits.se_broadcast_writes = 1;
-               reg_gfx_index.bits.instance_broadcast_writes = 1;
-
-               reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
-
-               break;
-
-       /* Send command to all CU waves with matching VMID */
-       case HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU:
-
-               reg_sq_cmd.bits.check_vmid = 1;
-               reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
-
-               reg_gfx_index.bits.sh_index = pMsg->ui32.ShaderArray;
-               reg_gfx_index.bits.se_index = pMsg->ui32.ShaderEngine;
-               reg_gfx_index.bits.instance_index = pMsg->ui32.HSACU;
-
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       switch (wac_info->operand) {
-       case HSA_DBG_WAVEOP_HALT:
-               reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_HALT;
-               break;
-
-       case HSA_DBG_WAVEOP_RESUME:
-               reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_RESUME;
-               break;
-
-       case HSA_DBG_WAVEOP_KILL:
-               reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
-               break;
-
-       case HSA_DBG_WAVEOP_DEBUG:
-               reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_DEBUG;
-               break;
-
-       case HSA_DBG_WAVEOP_TRAP:
-               if (wac_info->trapId < MAX_TRAPID) {
-                       reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_TRAP;
-                       reg_sq_cmd.bits.trap_id = wac_info->trapId;
-               } else {
-                       status = -EINVAL;
-               }
-               break;
-
-       default:
-               status = -EINVAL;
-               break;
-       }
-
-       if (status == 0) {
-               *in_reg_sq_cmd = reg_sq_cmd;
-               *in_reg_gfx_index = reg_gfx_index;
-       }
-
-       return status;
-}
-
-static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
-                                       struct dbg_wave_control_info *wac_info)
-{
-
-       int status;
-       union SQ_CMD_BITS reg_sq_cmd;
-       union GRBM_GFX_INDEX_BITS reg_gfx_index;
-       struct kfd_mem_obj *mem_obj;
-       uint32_t *packet_buff_uint;
-       struct pm4__set_config_reg *packets_vec;
-       size_t ib_size = sizeof(struct pm4__set_config_reg) * 3;
-
-       reg_sq_cmd.u32All = 0;
-
-       status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
-                                                       &reg_gfx_index);
-       if (status) {
-               pr_err("Failed to set wave control registers\n");
-               return status;
-       }
-
-       /* we do not control the VMID in DIQ, so reset it to a known value */
-       reg_sq_cmd.bits.vm_id = 0;
-
-       pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
-
-       pr_debug("\t\t mode      is: %u\n", wac_info->mode);
-       pr_debug("\t\t operand   is: %u\n", wac_info->operand);
-       pr_debug("\t\t trap id   is: %u\n", wac_info->trapId);
-       pr_debug("\t\t msg value is: %u\n",
-                       wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
-       pr_debug("\t\t vmid      is: N/A\n");
-
-       pr_debug("\t\t chk_vmid  is : %u\n", reg_sq_cmd.bitfields.check_vmid);
-       pr_debug("\t\t command   is : %u\n", reg_sq_cmd.bitfields.cmd);
-       pr_debug("\t\t queue id  is : %u\n", reg_sq_cmd.bitfields.queue_id);
-       pr_debug("\t\t simd id   is : %u\n", reg_sq_cmd.bitfields.simd_id);
-       pr_debug("\t\t mode      is : %u\n", reg_sq_cmd.bitfields.mode);
-       pr_debug("\t\t vm_id     is : %u\n", reg_sq_cmd.bitfields.vm_id);
-       pr_debug("\t\t wave_id   is : %u\n", reg_sq_cmd.bitfields.wave_id);
-
-       pr_debug("\t\t ibw       is : %u\n",
-                       reg_gfx_index.bitfields.instance_broadcast_writes);
-       pr_debug("\t\t ii        is : %u\n",
-                       reg_gfx_index.bitfields.instance_index);
-       pr_debug("\t\t sebw      is : %u\n",
-                       reg_gfx_index.bitfields.se_broadcast_writes);
-       pr_debug("\t\t se_ind    is : %u\n", reg_gfx_index.bitfields.se_index);
-       pr_debug("\t\t sh_ind    is : %u\n", reg_gfx_index.bitfields.sh_index);
-       pr_debug("\t\t sbw       is : %u\n",
-                       reg_gfx_index.bitfields.sh_broadcast_writes);
-
-       pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
-
-       status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
-
-       if (status != 0) {
-               pr_err("Failed to allocate GART memory\n");
-               return status;
-       }
-
-       packet_buff_uint = mem_obj->cpu_ptr;
-
-       memset(packet_buff_uint, 0, ib_size);
-
-       packets_vec =  (struct pm4__set_config_reg *) packet_buff_uint;
-       packets_vec[0].header.count = 1;
-       packets_vec[0].header.opcode = IT_SET_UCONFIG_REG;
-       packets_vec[0].header.type = PM4_TYPE_3;
-       packets_vec[0].bitfields2.reg_offset =
-                       GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
-
-       packets_vec[0].bitfields2.insert_vmid = 0;
-       packets_vec[0].reg_data[0] = reg_gfx_index.u32All;
-
-       packets_vec[1].header.count = 1;
-       packets_vec[1].header.opcode = IT_SET_CONFIG_REG;
-       packets_vec[1].header.type = PM4_TYPE_3;
-       packets_vec[1].bitfields2.reg_offset = SQ_CMD / 4 - AMD_CONFIG_REG_BASE;
-
-       packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET;
-       packets_vec[1].bitfields2.insert_vmid = 1;
-       packets_vec[1].reg_data[0] = reg_sq_cmd.u32All;
-
-       /* Restore the GRBM_GFX_INDEX register */
-
-       reg_gfx_index.u32All = 0;
-       reg_gfx_index.bits.sh_broadcast_writes = 1;
-       reg_gfx_index.bits.instance_broadcast_writes = 1;
-       reg_gfx_index.bits.se_broadcast_writes = 1;
-
-
-       packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
-       packets_vec[2].bitfields2.reg_offset =
-                               GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
-
-       packets_vec[2].bitfields2.insert_vmid = 0;
-       packets_vec[2].reg_data[0] = reg_gfx_index.u32All;
-
-       status = dbgdev_diq_submit_ib(
-                       dbgdev,
-                       wac_info->process->pasid,
-                       mem_obj->gpu_addr,
-                       packet_buff_uint,
-                       ib_size);
-
-       if (status)
-               pr_err("Failed to submit IB to DIQ\n");
-
-       kfd_gtt_sa_free(dbgdev->dev, mem_obj);
-
-       return status;
-}
-
-static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev,
-                                       struct dbg_wave_control_info *wac_info)
-{
-       int status;
-       union SQ_CMD_BITS reg_sq_cmd;
-       union GRBM_GFX_INDEX_BITS reg_gfx_index;
-       struct kfd_process_device *pdd;
-
-       reg_sq_cmd.u32All = 0;
-
-       /* taking the VMID for that process on the safe way using PDD */
-       pdd = kfd_get_process_device_data(dbgdev->dev, wac_info->process);
-
-       if (!pdd) {
-               pr_err("Failed to get pdd for wave control no DIQ\n");
-               return -EFAULT;
-       }
-       status = dbgdev_wave_control_set_registers(wac_info, &reg_sq_cmd,
-                                                       &reg_gfx_index);
-       if (status) {
-               pr_err("Failed to set wave control registers\n");
-               return status;
-       }
-
-       /* for non DIQ we need to patch the VMID: */
-
-       reg_sq_cmd.bits.vm_id = pdd->qpd.vmid;
-
-       pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
-
-       pr_debug("\t\t mode      is: %u\n", wac_info->mode);
-       pr_debug("\t\t operand   is: %u\n", wac_info->operand);
-       pr_debug("\t\t trap id   is: %u\n", wac_info->trapId);
-       pr_debug("\t\t msg value is: %u\n",
-                       wac_info->dbgWave_msg.DbgWaveMsg.WaveMsgInfoGen2.Value);
-       pr_debug("\t\t vmid      is: %u\n", pdd->qpd.vmid);
-
-       pr_debug("\t\t chk_vmid  is : %u\n", reg_sq_cmd.bitfields.check_vmid);
-       pr_debug("\t\t command   is : %u\n", reg_sq_cmd.bitfields.cmd);
-       pr_debug("\t\t queue id  is : %u\n", reg_sq_cmd.bitfields.queue_id);
-       pr_debug("\t\t simd id   is : %u\n", reg_sq_cmd.bitfields.simd_id);
-       pr_debug("\t\t mode      is : %u\n", reg_sq_cmd.bitfields.mode);
-       pr_debug("\t\t vm_id     is : %u\n", reg_sq_cmd.bitfields.vm_id);
-       pr_debug("\t\t wave_id   is : %u\n", reg_sq_cmd.bitfields.wave_id);
-
-       pr_debug("\t\t ibw       is : %u\n",
-                       reg_gfx_index.bitfields.instance_broadcast_writes);
-       pr_debug("\t\t ii        is : %u\n",
-                       reg_gfx_index.bitfields.instance_index);
-       pr_debug("\t\t sebw      is : %u\n",
-                       reg_gfx_index.bitfields.se_broadcast_writes);
-       pr_debug("\t\t se_ind    is : %u\n", reg_gfx_index.bitfields.se_index);
-       pr_debug("\t\t sh_ind    is : %u\n", reg_gfx_index.bitfields.sh_index);
-       pr_debug("\t\t sbw       is : %u\n",
-                       reg_gfx_index.bitfields.sh_broadcast_writes);
-
-       pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *");
-
-       return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev,
-                                                       reg_gfx_index.u32All,
-                                                       reg_sq_cmd.u32All);
-}
-
-int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
-{
-       int status = 0;
-       unsigned int vmid;
-       uint16_t queried_pasid;
-       union SQ_CMD_BITS reg_sq_cmd;
-       union GRBM_GFX_INDEX_BITS reg_gfx_index;
-       struct kfd_process_device *pdd;
-       struct dbg_wave_control_info wac_info;
-       int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
-       int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
-
-       reg_sq_cmd.u32All = 0;
-       status = 0;
-
-       wac_info.mode = HSA_DBG_WAVEMODE_BROADCAST_PROCESS;
-       wac_info.operand = HSA_DBG_WAVEOP_KILL;
-
-       pr_debug("Killing all process wavefronts\n");
-
-       /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
-        * ATC_VMID15_PASID_MAPPING
-        * to check which VMID the current process is mapped to.
-        */
-
-       for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
-               status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
-                               (dev->adev, vmid, &queried_pasid);
-
-               if (status && queried_pasid == p->pasid) {
-                       pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
-                                       vmid, p->pasid);
-                       break;
-               }
-       }
-
-       if (vmid > last_vmid_to_scan) {
-               pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
-               return -EFAULT;
-       }
-
-       /* taking the VMID for that process on the safe way using PDD */
-       pdd = kfd_get_process_device_data(dev, p);
-       if (!pdd)
-               return -EFAULT;
-
-       status = dbgdev_wave_control_set_registers(&wac_info, &reg_sq_cmd,
-                       &reg_gfx_index);
-       if (status != 0)
-               return -EINVAL;
-
-       /* for non DIQ we need to patch the VMID: */
-       reg_sq_cmd.bits.vm_id = vmid;
-
-       dev->kfd2kgd->wave_control_execute(dev->adev,
-                                       reg_gfx_index.u32All,
-                                       reg_sq_cmd.u32All);
-
-       return 0;
-}
-
-void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
-                       enum DBGDEV_TYPE type)
-{
-       pdbgdev->dev = pdev;
-       pdbgdev->kq = NULL;
-       pdbgdev->type = type;
-       pdbgdev->pqm = NULL;
-
-       switch (type) {
-       case DBGDEV_TYPE_NODIQ:
-               pdbgdev->dbgdev_register = dbgdev_register_nodiq;
-               pdbgdev->dbgdev_unregister = dbgdev_unregister_nodiq;
-               pdbgdev->dbgdev_wave_control = dbgdev_wave_control_nodiq;
-               pdbgdev->dbgdev_address_watch = dbgdev_address_watch_nodiq;
-               break;
-       case DBGDEV_TYPE_DIQ:
-       default:
-               pdbgdev->dbgdev_register = dbgdev_register_diq;
-               pdbgdev->dbgdev_unregister = dbgdev_unregister_diq;
-               pdbgdev->dbgdev_wave_control =  dbgdev_wave_control_diq;
-               pdbgdev->dbgdev_address_watch = dbgdev_address_watch_diq;
-               break;
-       }
-
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
deleted file mode 100644 (file)
index 0619c77..0000000
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef KFD_DBGDEV_H_
-#define KFD_DBGDEV_H_
-
-enum {
-       SQ_CMD_VMID_OFFSET = 28,
-       ADDRESS_WATCH_CNTL_OFFSET = 24
-};
-
-enum {
-       PRIV_QUEUE_SYNC_TIME_MS = 200
-};
-
-/* CONTEXT reg space definition */
-enum {
-       CONTEXT_REG_BASE = 0xA000,
-       CONTEXT_REG_END = 0xA400,
-       CONTEXT_REG_SIZE = CONTEXT_REG_END - CONTEXT_REG_BASE
-};
-
-/* USER CONFIG reg space definition */
-enum {
-       USERCONFIG_REG_BASE = 0xC000,
-       USERCONFIG_REG_END = 0x10000,
-       USERCONFIG_REG_SIZE = USERCONFIG_REG_END - USERCONFIG_REG_BASE
-};
-
-/* CONFIG reg space definition */
-enum {
-       AMD_CONFIG_REG_BASE = 0x2000,   /* in dwords */
-       AMD_CONFIG_REG_END = 0x2B00,
-       AMD_CONFIG_REG_SIZE = AMD_CONFIG_REG_END - AMD_CONFIG_REG_BASE
-};
-
-/* SH reg space definition */
-enum {
-       SH_REG_BASE = 0x2C00,
-       SH_REG_END = 0x3000,
-       SH_REG_SIZE = SH_REG_END - SH_REG_BASE
-};
-
-/* SQ_CMD definitions */
-#define SQ_CMD                                         0x8DEC
-
-enum SQ_IND_CMD_CMD {
-       SQ_IND_CMD_CMD_NULL = 0x00000000,
-       SQ_IND_CMD_CMD_HALT = 0x00000001,
-       SQ_IND_CMD_CMD_RESUME = 0x00000002,
-       SQ_IND_CMD_CMD_KILL = 0x00000003,
-       SQ_IND_CMD_CMD_DEBUG = 0x00000004,
-       SQ_IND_CMD_CMD_TRAP = 0x00000005,
-};
-
-enum SQ_IND_CMD_MODE {
-       SQ_IND_CMD_MODE_SINGLE = 0x00000000,
-       SQ_IND_CMD_MODE_BROADCAST = 0x00000001,
-       SQ_IND_CMD_MODE_BROADCAST_QUEUE = 0x00000002,
-       SQ_IND_CMD_MODE_BROADCAST_PIPE = 0x00000003,
-       SQ_IND_CMD_MODE_BROADCAST_ME = 0x00000004,
-};
-
-union SQ_IND_INDEX_BITS {
-       struct {
-               uint32_t wave_id:4;
-               uint32_t simd_id:2;
-               uint32_t thread_id:6;
-                uint32_t:1;
-               uint32_t force_read:1;
-               uint32_t read_timeout:1;
-               uint32_t unindexed:1;
-               uint32_t index:16;
-
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union SQ_IND_CMD_BITS {
-       struct {
-               uint32_t data:32;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union SQ_CMD_BITS {
-       struct {
-               uint32_t cmd:3;
-                uint32_t:1;
-               uint32_t mode:3;
-               uint32_t check_vmid:1;
-               uint32_t trap_id:3;
-                uint32_t:5;
-               uint32_t wave_id:4;
-               uint32_t simd_id:2;
-                uint32_t:2;
-               uint32_t queue_id:3;
-                uint32_t:1;
-               uint32_t vm_id:4;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union SQ_IND_DATA_BITS {
-       struct {
-               uint32_t data:32;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union GRBM_GFX_INDEX_BITS {
-       struct {
-               uint32_t instance_index:8;
-               uint32_t sh_index:8;
-               uint32_t se_index:8;
-                uint32_t:5;
-               uint32_t sh_broadcast_writes:1;
-               uint32_t instance_broadcast_writes:1;
-               uint32_t se_broadcast_writes:1;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union TCP_WATCH_ADDR_H_BITS {
-       struct {
-               uint32_t addr:16;
-                uint32_t:16;
-
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-union TCP_WATCH_ADDR_L_BITS {
-       struct {
-               uint32_t:6;
-               uint32_t addr:26;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-enum {
-       QUEUESTATE__INVALID = 0, /* so by default we'll get invalid state */
-       QUEUESTATE__ACTIVE_COMPLETION_PENDING,
-       QUEUESTATE__ACTIVE
-};
-
-union ULARGE_INTEGER {
-       struct {
-               uint32_t low_part;
-               uint32_t high_part;
-       } u;
-       unsigned long long quad_part;
-};
-
-
-#define KFD_CIK_VMID_START_OFFSET (8)
-#define KFD_CIK_VMID_END_OFFSET (KFD_CIK_VMID_START_OFFSET + (8))
-
-
-void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
-                       enum DBGDEV_TYPE type);
-
-union TCP_WATCH_CNTL_BITS {
-       struct {
-               uint32_t mask:24;
-               uint32_t vmid:4;
-               uint32_t atc:1;
-               uint32_t mode:2;
-               uint32_t valid:1;
-       } bitfields, bits;
-       uint32_t u32All;
-       signed int i32All;
-       float f32All;
-};
-
-enum {
-       ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
-       ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
-       ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
-       /* extend the mask to 26 bits in order to match the low address field */
-       ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
-       ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
-};
-
-enum {
-       MAX_TRAPID = 8,         /* 3 bits in the bitfield. */
-       MAX_WATCH_ADDRESSES = 4
-};
-
-enum {
-       ADDRESS_WATCH_REG_ADDR_HI = 0,
-       ADDRESS_WATCH_REG_ADDR_LO,
-       ADDRESS_WATCH_REG_CNTL,
-       ADDRESS_WATCH_REG_MAX
-};
-
-#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
deleted file mode 100644 (file)
index 9bfa506..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/log2.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-
-#include "kfd_priv.h"
-#include "cik_regs.h"
-#include "kfd_pm4_headers.h"
-#include "kfd_pm4_headers_diq.h"
-#include "kfd_dbgmgr.h"
-#include "kfd_dbgdev.h"
-#include "kfd_device_queue_manager.h"
-
-static DEFINE_MUTEX(kfd_dbgmgr_mutex);
-
-struct mutex *kfd_get_dbgmgr_mutex(void)
-{
-       return &kfd_dbgmgr_mutex;
-}
-
-
-static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
-{
-       kfree(pmgr->dbgdev);
-
-       pmgr->dbgdev = NULL;
-       pmgr->pasid = 0;
-       pmgr->dev = NULL;
-}
-
-void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
-{
-       if (pmgr) {
-               kfd_dbgmgr_uninitialize(pmgr);
-               kfree(pmgr);
-       }
-}
-
-bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
-{
-       enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
-       struct kfd_dbgmgr *new_buff;
-
-       if (WARN_ON(!pdev->init_complete))
-               return false;
-
-       new_buff = kfd_alloc_struct(new_buff);
-       if (!new_buff) {
-               pr_err("Failed to allocate dbgmgr instance\n");
-               return false;
-       }
-
-       new_buff->pasid = 0;
-       new_buff->dev = pdev;
-       new_buff->dbgdev = kfd_alloc_struct(new_buff->dbgdev);
-       if (!new_buff->dbgdev) {
-               pr_err("Failed to allocate dbgdev instance\n");
-               kfree(new_buff);
-               return false;
-       }
-
-       /* get actual type of DBGDevice cpsch or not */
-       if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
-               type = DBGDEV_TYPE_NODIQ;
-
-       kfd_dbgdev_init(new_buff->dbgdev, pdev, type);
-       *ppmgr = new_buff;
-
-       return true;
-}
-
-long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
-{
-       if (pmgr->pasid != 0) {
-               pr_debug("H/W debugger is already active using pasid 0x%x\n",
-                               pmgr->pasid);
-               return -EBUSY;
-       }
-
-       /* remember pasid */
-       pmgr->pasid = p->pasid;
-
-       /* provide the pqm for diq generation */
-       pmgr->dbgdev->pqm = &p->pqm;
-
-       /* activate the actual registering */
-       pmgr->dbgdev->dbgdev_register(pmgr->dbgdev);
-
-       return 0;
-}
-
-long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
-{
-       /* Is the requests coming from the already registered process? */
-       if (pmgr->pasid != p->pasid) {
-               pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
-                               p->pasid);
-               return -EINVAL;
-       }
-
-       pmgr->dbgdev->dbgdev_unregister(pmgr->dbgdev);
-
-       pmgr->pasid = 0;
-
-       return 0;
-}
-
-long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
-                               struct dbg_wave_control_info *wac_info)
-{
-       /* Is the requests coming from the already registered process? */
-       if (pmgr->pasid != wac_info->process->pasid) {
-               pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
-                               wac_info->process->pasid);
-               return -EINVAL;
-       }
-
-       return (long) pmgr->dbgdev->dbgdev_wave_control(pmgr->dbgdev, wac_info);
-}
-
-long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
-                               struct dbg_address_watch_info *adw_info)
-{
-       /* Is the requests coming from the already registered process? */
-       if (pmgr->pasid != adw_info->process->pasid) {
-               pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
-                               adw_info->process->pasid);
-               return -EINVAL;
-       }
-
-       return (long) pmgr->dbgdev->dbgdev_address_watch(pmgr->dbgdev,
-                                                       adw_info);
-}
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h
deleted file mode 100644 (file)
index f9c6df1..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef KFD_DBGMGR_H_
-#define KFD_DBGMGR_H_
-
-#include "kfd_priv.h"
-
-/* must align with hsakmttypes definition */
-#pragma pack(push, 4)
-
-enum HSA_DBG_WAVEOP {
-       HSA_DBG_WAVEOP_HALT = 1,   /* Halts a wavefront */
-       HSA_DBG_WAVEOP_RESUME = 2, /* Resumes a wavefront */
-       HSA_DBG_WAVEOP_KILL = 3,   /* Kills a wavefront */
-       HSA_DBG_WAVEOP_DEBUG = 4,  /* Causes wavefront to enter dbg mode */
-       HSA_DBG_WAVEOP_TRAP = 5,   /* Causes wavefront to take a trap */
-       HSA_DBG_NUM_WAVEOP = 5,
-       HSA_DBG_MAX_WAVEOP = 0xFFFFFFFF
-};
-
-enum HSA_DBG_WAVEMODE {
-       /* send command to a single wave */
-       HSA_DBG_WAVEMODE_SINGLE = 0,
-       /*
-        * Broadcast to all wavefronts of all processes is not
-        * supported for HSA user mode
-        */
-
-       /* send to waves within current process */
-       HSA_DBG_WAVEMODE_BROADCAST_PROCESS = 2,
-       /* send to waves within current process on CU  */
-       HSA_DBG_WAVEMODE_BROADCAST_PROCESS_CU = 3,
-       HSA_DBG_NUM_WAVEMODE = 3,
-       HSA_DBG_MAX_WAVEMODE = 0xFFFFFFFF
-};
-
-enum HSA_DBG_WAVEMSG_TYPE {
-       HSA_DBG_WAVEMSG_AUTO = 0,
-       HSA_DBG_WAVEMSG_USER = 1,
-       HSA_DBG_WAVEMSG_ERROR = 2,
-       HSA_DBG_NUM_WAVEMSG,
-       HSA_DBG_MAX_WAVEMSG = 0xFFFFFFFF
-};
-
-enum HSA_DBG_WATCH_MODE {
-       HSA_DBG_WATCH_READ = 0,         /* Read operations only */
-       HSA_DBG_WATCH_NONREAD = 1,      /* Write or Atomic operations only */
-       HSA_DBG_WATCH_ATOMIC = 2,       /* Atomic Operations only */
-       HSA_DBG_WATCH_ALL = 3,          /* Read, Write or Atomic operations */
-       HSA_DBG_WATCH_NUM,
-       HSA_DBG_WATCH_SIZE = 0xFFFFFFFF
-};
-
-/* This structure is hardware specific and may change in the future */
-struct HsaDbgWaveMsgAMDGen2 {
-       union {
-               struct ui32 {
-                       uint32_t UserData:8;    /* user data */
-                       uint32_t ShaderArray:1; /* Shader array */
-                       uint32_t Priv:1;        /* Privileged */
-                       uint32_t Reserved0:4;   /* Reserved, should be 0 */
-                       uint32_t WaveId:4;      /* wave id */
-                       uint32_t SIMD:2;        /* SIMD id */
-                       uint32_t HSACU:4;       /* Compute unit */
-                       uint32_t ShaderEngine:2;/* Shader engine */
-                       uint32_t MessageType:2; /* see HSA_DBG_WAVEMSG_TYPE */
-                       uint32_t Reserved1:4;   /* Reserved, should be 0 */
-               } ui32;
-               uint32_t Value;
-       };
-       uint32_t Reserved2;
-};
-
-union HsaDbgWaveMessageAMD {
-       struct HsaDbgWaveMsgAMDGen2 WaveMsgInfoGen2;
-       /* for future HsaDbgWaveMsgAMDGen3; */
-};
-
-struct HsaDbgWaveMessage {
-       void *MemoryVA;         /* ptr to associated host-accessible data */
-       union HsaDbgWaveMessageAMD DbgWaveMsg;
-};
-
-/*
- * TODO: This definitions to be MOVED to kfd_event, once it is implemented.
- *
- * HSA sync primitive, Event and HW Exception notification API definitions.
- * The API functions allow the runtime to define a so-called sync-primitive,
- * a SW object combining a user-mode provided "syncvar" and a scheduler event
- * that can be signaled through a defined GPU interrupt. A syncvar is
- * a process virtual memory location of a certain size that can be accessed
- * by CPU and GPU shader code within the process to set and query the content
- * within that memory. The definition of the content is determined by the HSA
- * runtime and potentially GPU shader code interfacing with the HSA runtime.
- * The syncvar values may be commonly written through an PM4 WRITE_DATA packet
- * in the user mode instruction stream. The OS scheduler event is typically
- * associated and signaled by an interrupt issued by the GPU, but other HSA
- * system interrupt conditions from other HW (e.g. IOMMUv2) may be surfaced
- * by the KFD by this mechanism, too.
- */
-
-/* these are the new definitions for events */
-enum HSA_EVENTTYPE {
-       HSA_EVENTTYPE_SIGNAL = 0,       /* user-mode generated GPU signal */
-       HSA_EVENTTYPE_NODECHANGE = 1,   /* HSA node change (attach/detach) */
-       HSA_EVENTTYPE_DEVICESTATECHANGE = 2,    /* HSA device state change
-                                                * (start/stop)
-                                                */
-       HSA_EVENTTYPE_HW_EXCEPTION = 3, /* GPU shader exception event */
-       HSA_EVENTTYPE_SYSTEM_EVENT = 4, /* GPU SYSCALL with parameter info */
-       HSA_EVENTTYPE_DEBUG_EVENT = 5,  /* GPU signal for debugging */
-       HSA_EVENTTYPE_PROFILE_EVENT = 6,/* GPU signal for profiling */
-       HSA_EVENTTYPE_QUEUE_EVENT = 7,  /* GPU signal queue idle state
-                                        * (EOP pm4)
-                                        */
-       /* ...  */
-       HSA_EVENTTYPE_MAXID,
-       HSA_EVENTTYPE_TYPE_SIZE = 0xFFFFFFFF
-};
-
-/* Sub-definitions for various event types: Syncvar */
-struct HsaSyncVar {
-       union SyncVar {
-               void *UserData; /* pointer to user mode data */
-               uint64_t UserDataPtrValue; /* 64bit compatibility of value */
-       } SyncVar;
-       uint64_t SyncVarSize;
-};
-
-/* Sub-definitions for various event types: NodeChange */
-
-enum HSA_EVENTTYPE_NODECHANGE_FLAGS {
-       HSA_EVENTTYPE_NODECHANGE_ADD = 0,
-       HSA_EVENTTYPE_NODECHANGE_REMOVE = 1,
-       HSA_EVENTTYPE_NODECHANGE_SIZE = 0xFFFFFFFF
-};
-
-struct HsaNodeChange {
-       /* HSA node added/removed on the platform */
-       enum HSA_EVENTTYPE_NODECHANGE_FLAGS Flags;
-};
-
-/* Sub-definitions for various event types: DeviceStateChange */
-enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS {
-       /* device started (and available) */
-       HSA_EVENTTYPE_DEVICESTATUSCHANGE_START = 0,
-       /* device stopped (i.e. unavailable) */
-       HSA_EVENTTYPE_DEVICESTATUSCHANGE_STOP = 1,
-       HSA_EVENTTYPE_DEVICESTATUSCHANGE_SIZE = 0xFFFFFFFF
-};
-
-enum HSA_DEVICE {
-       HSA_DEVICE_CPU = 0,
-       HSA_DEVICE_GPU = 1,
-       MAX_HSA_DEVICE = 2
-};
-
-struct HsaDeviceStateChange {
-       uint32_t NodeId;        /* F-NUMA node that contains the device */
-       enum HSA_DEVICE Device; /* device type: GPU or CPU */
-       enum HSA_EVENTTYPE_DEVICESTATECHANGE_FLAGS Flags; /* event flags */
-};
-
-struct HsaEventData {
-       enum HSA_EVENTTYPE EventType; /* event type */
-       union EventData {
-               /*
-                * return data associated with HSA_EVENTTYPE_SIGNAL
-                * and other events
-                */
-               struct HsaSyncVar SyncVar;
-
-               /* data associated with HSA_EVENTTYPE_NODE_CHANGE */
-               struct HsaNodeChange NodeChangeState;
-
-               /* data associated with HSA_EVENTTYPE_DEVICE_STATE_CHANGE */
-               struct HsaDeviceStateChange DeviceState;
-       } EventData;
-
-       /* the following data entries are internal to the KFD & thunk itself */
-
-       /* internal thunk store for Event data (OsEventHandle) */
-       uint64_t HWData1;
-       /* internal thunk store for Event data (HWAddress) */
-       uint64_t HWData2;
-       /* internal thunk store for Event data (HWData) */
-       uint32_t HWData3;
-};
-
-struct HsaEventDescriptor {
-       /* event type to allocate */
-       enum HSA_EVENTTYPE EventType;
-       /* H-NUMA node containing GPU device that is event source */
-       uint32_t NodeId;
-       /* pointer to user mode syncvar data, syncvar->UserDataPtrValue
-        * may be NULL
-        */
-       struct HsaSyncVar SyncVar;
-};
-
-struct HsaEvent {
-       uint32_t EventId;
-       struct HsaEventData EventData;
-};
-
-#pragma pack(pop)
-
-enum DBGDEV_TYPE {
-       DBGDEV_TYPE_ILLEGAL = 0,
-       DBGDEV_TYPE_NODIQ = 1,
-       DBGDEV_TYPE_DIQ = 2,
-       DBGDEV_TYPE_TEST = 3
-};
-
-struct dbg_address_watch_info {
-       struct kfd_process *process;
-       enum HSA_DBG_WATCH_MODE *watch_mode;
-       uint64_t *watch_address;
-       uint64_t *watch_mask;
-       struct HsaEvent *watch_event;
-       uint32_t num_watch_points;
-};
-
-struct dbg_wave_control_info {
-       struct kfd_process *process;
-       uint32_t trapId;
-       enum HSA_DBG_WAVEOP operand;
-       enum HSA_DBG_WAVEMODE mode;
-       struct HsaDbgWaveMessage dbgWave_msg;
-};
-
-struct kfd_dbgdev {
-
-       /* The device that owns this data. */
-       struct kfd_dev *dev;
-
-       /* kernel queue for DIQ */
-       struct kernel_queue *kq;
-
-       /* a pointer to the pqm of the calling process */
-       struct process_queue_manager *pqm;
-
-       /* type of debug device ( DIQ, non DIQ, etc. ) */
-       enum DBGDEV_TYPE type;
-
-       /* virtualized function pointers to device dbg */
-       int (*dbgdev_register)(struct kfd_dbgdev *dbgdev);
-       int (*dbgdev_unregister)(struct kfd_dbgdev *dbgdev);
-       int (*dbgdev_address_watch)(struct kfd_dbgdev *dbgdev,
-                               struct dbg_address_watch_info *adw_info);
-       int (*dbgdev_wave_control)(struct kfd_dbgdev *dbgdev,
-                               struct dbg_wave_control_info *wac_info);
-
-};
-
-struct kfd_dbgmgr {
-       u32 pasid;
-       struct kfd_dev *dev;
-       struct kfd_dbgdev *dbgdev;
-};
-
-/* prototypes for debug manager functions */
-struct mutex *kfd_get_dbgmgr_mutex(void);
-void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr);
-bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev);
-long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
-long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p);
-long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
-                               struct dbg_wave_control_info *wac_info);
-long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
-                       struct dbg_address_watch_info *adw_info);
-#endif /* KFD_DBGMGR_H_ */
index 2b65d0acae2ce582663cf3abf8e537a493eb04f4..7f174628998980de52c965fa6e8b89f89983d9ea 100644 (file)
@@ -64,34 +64,33 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
        uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
 
        switch (sdma_version) {
-               case IP_VERSION(4, 0, 0):/* VEGA10 */
-               case IP_VERSION(4, 0, 1):/* VEGA12 */
-               case IP_VERSION(4, 1, 0):/* RAVEN */
-               case IP_VERSION(4, 1, 1):/* RAVEN */
-               case IP_VERSION(4, 1, 2):/* RENOIR */
-               case IP_VERSION(5, 2, 1):/* VANGOGH */
-               case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
-                       kfd->device_info.num_sdma_queues_per_engine = 2;
-                       break;
-               case IP_VERSION(4, 2, 0):/* VEGA20 */
-               case IP_VERSION(4, 2, 2):/* ARCTURUS */
-               case IP_VERSION(4, 4, 0):/* ALDEBARAN */
-               case IP_VERSION(5, 0, 0):/* NAVI10 */
-               case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
-               case IP_VERSION(5, 0, 2):/* NAVI14 */
-               case IP_VERSION(5, 0, 5):/* NAVI12 */
-               case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
-               case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
-               case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
-               case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
-                       kfd->device_info.num_sdma_queues_per_engine = 8;
-                       break;
-               default:
-                       dev_warn(kfd_device,
-                               "Default sdma queue per engine(8) is set due to "
-                               "mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
-                                sdma_version);
-                       kfd->device_info.num_sdma_queues_per_engine = 8;
+       case IP_VERSION(4, 0, 0):/* VEGA10 */
+       case IP_VERSION(4, 0, 1):/* VEGA12 */
+       case IP_VERSION(4, 1, 0):/* RAVEN */
+       case IP_VERSION(4, 1, 1):/* RAVEN */
+       case IP_VERSION(4, 1, 2):/* RENOIR */
+       case IP_VERSION(5, 2, 1):/* VANGOGH */
+       case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
+               kfd->device_info.num_sdma_queues_per_engine = 2;
+               break;
+       case IP_VERSION(4, 2, 0):/* VEGA20 */
+       case IP_VERSION(4, 2, 2):/* ARCTURUS */
+       case IP_VERSION(4, 4, 0):/* ALDEBARAN */
+       case IP_VERSION(5, 0, 0):/* NAVI10 */
+       case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
+       case IP_VERSION(5, 0, 2):/* NAVI14 */
+       case IP_VERSION(5, 0, 5):/* NAVI12 */
+       case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
+       case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
+       case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
+       case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
+               kfd->device_info.num_sdma_queues_per_engine = 8;
+               break;
+       default:
+               dev_warn(kfd_device,
+                       "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
+                       sdma_version);
+               kfd->device_info.num_sdma_queues_per_engine = 8;
        }
 }
 
@@ -111,6 +110,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
        case IP_VERSION(10, 3, 1): /* VANGOGH */
        case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
        case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
+       case IP_VERSION(10, 1, 4):
        case IP_VERSION(10, 1, 10): /* NAVI10 */
        case IP_VERSION(10, 1, 2): /* NAVI12 */
        case IP_VERSION(10, 1, 1): /* NAVI14 */
@@ -308,6 +308,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
                        break;
                /* Cyan Skillfish */
                case IP_VERSION(10, 1, 3):
+               case IP_VERSION(10, 1, 4):
                        gfx_target_version = 100103;
                        if (!vf)
                                f2g = &gfx_v10_kfd2kgd;
@@ -576,8 +577,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        if (kfd_resume(kfd))
                goto kfd_resume_error;
 
-       kfd->dbgmgr = NULL;
-
        if (kfd_topology_add_device(kfd)) {
                dev_err(kfd_device, "Error adding device to topology\n");
                goto kfd_topology_add_device_error;
index 4b6814949aad0fc9ffa063f185f8b9f6ab332f2e..7f6f1a842b0b9c4821c86c88e05039546b540143 100644 (file)
@@ -58,7 +58,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
                                struct queue *q);
 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
 static int allocate_sdma_queue(struct device_queue_manager *dqm,
-                               struct queue *q);
+                               struct queue *q, const uint32_t *restore_sdma_id);
 static void kfd_process_hw_exception(struct work_struct *work);
 
 static inline
@@ -144,7 +144,13 @@ static void decrement_queue_count(struct device_queue_manager *dqm,
                dqm->active_cp_queue_count--;
 }
 
-static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
+/*
+ * Allocate a doorbell ID to this queue.
+ * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
+ */
+static int allocate_doorbell(struct qcm_process_device *qpd,
+                            struct queue *q,
+                            uint32_t const *restore_id)
 {
        struct kfd_dev *dev = qpd->dqm->dev;
 
@@ -152,6 +158,10 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
                /* On pre-SOC15 chips we need to use the queue ID to
                 * preserve the user mode ABI.
                 */
+
+               if (restore_id && *restore_id != q->properties.queue_id)
+                       return -EINVAL;
+
                q->doorbell_id = q->properties.queue_id;
        } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
                        q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
@@ -160,25 +170,37 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
                 * The doobell index distance between RLC (2*i) and (2*i+1)
                 * for a SDMA engine is 512.
                 */
-               uint32_t *idx_offset =
-                               dev->shared_resources.sdma_doorbell_idx;
 
-               q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
-                       + (q->properties.sdma_queue_id & 1)
-                       * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
-                       + (q->properties.sdma_queue_id >> 1);
+               uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
+               uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
+                                               + (q->properties.sdma_queue_id & 1)
+                                               * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+                                               + (q->properties.sdma_queue_id >> 1);
+
+               if (restore_id && *restore_id != valid_id)
+                       return -EINVAL;
+               q->doorbell_id = valid_id;
        } else {
-               /* For CP queues on SOC15 reserve a free doorbell ID */
-               unsigned int found;
-
-               found = find_first_zero_bit(qpd->doorbell_bitmap,
-                                           KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
-               if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
-                       pr_debug("No doorbells available");
-                       return -EBUSY;
+               /* For CP queues on SOC15 */
+               if (restore_id) {
+                       /* make sure that ID is free  */
+                       if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
+                               return -EINVAL;
+
+                       q->doorbell_id = *restore_id;
+               } else {
+                       /* or reserve a free doorbell ID */
+                       unsigned int found;
+
+                       found = find_first_zero_bit(qpd->doorbell_bitmap,
+                                               KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
+                       if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
+                               pr_debug("No doorbells available");
+                               return -EBUSY;
+                       }
+                       set_bit(found, qpd->doorbell_bitmap);
+                       q->doorbell_id = found;
                }
-               set_bit(found, qpd->doorbell_bitmap);
-               q->doorbell_id = found;
        }
 
        q->properties.doorbell_off =
@@ -299,7 +321,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
 
 static int create_queue_nocpsch(struct device_queue_manager *dqm,
                                struct queue *q,
-                               struct qcm_process_device *qpd)
+                               struct qcm_process_device *qpd,
+                               const struct kfd_criu_queue_priv_data *qd,
+                               const void *restore_mqd, const void *restore_ctl_stack)
 {
        struct mqd_manager *mqd_mgr;
        int retval;
@@ -339,13 +363,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
                        q->pipe, q->queue);
        } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
                q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
-               retval = allocate_sdma_queue(dqm, q);
+               retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
                if (retval)
                        goto deallocate_vmid;
                dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
        }
 
-       retval = allocate_doorbell(qpd, q);
+       retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
        if (retval)
                goto out_deallocate_hqd;
 
@@ -358,8 +382,15 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
                retval = -ENOMEM;
                goto out_deallocate_doorbell;
        }
-       mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
-                               &q->gart_mqd_addr, &q->properties);
+
+       if (qd)
+               mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
+                                    &q->properties, restore_mqd, restore_ctl_stack,
+                                    qd->ctl_stack_size);
+       else
+               mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
+                                       &q->gart_mqd_addr, &q->properties);
+
        if (q->properties.is_active) {
                if (!dqm->sched_running) {
                        WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
@@ -449,6 +480,65 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm,
        dqm->allocated_queues[q->pipe] |= (1 << q->queue);
 }
 
+#define SQ_IND_CMD_CMD_KILL            0x00000003
+#define SQ_IND_CMD_MODE_BROADCAST      0x00000001
+
+static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
+{
+       int status = 0;
+       unsigned int vmid;
+       uint16_t queried_pasid;
+       union SQ_CMD_BITS reg_sq_cmd;
+       union GRBM_GFX_INDEX_BITS reg_gfx_index;
+       struct kfd_process_device *pdd;
+       int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
+       int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
+
+       reg_sq_cmd.u32All = 0;
+       reg_gfx_index.u32All = 0;
+
+       pr_debug("Killing all process wavefronts\n");
+
+       /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
+        * ATC_VMID15_PASID_MAPPING
+        * to check which VMID the current process is mapped to.
+        */
+
+       for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
+               status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+                               (dev->adev, vmid, &queried_pasid);
+
+               if (status && queried_pasid == p->pasid) {
+                       pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+                                       vmid, p->pasid);
+                       break;
+               }
+       }
+
+       if (vmid > last_vmid_to_scan) {
+               pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
+               return -EFAULT;
+       }
+
+       /* taking the VMID for that process on the safe way using PDD */
+       pdd = kfd_get_process_device_data(dev, p);
+       if (!pdd)
+               return -EFAULT;
+
+       reg_gfx_index.bits.sh_broadcast_writes = 1;
+       reg_gfx_index.bits.se_broadcast_writes = 1;
+       reg_gfx_index.bits.instance_broadcast_writes = 1;
+       reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
+       reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
+       reg_sq_cmd.bits.vm_id = vmid;
+
+       dev->kfd2kgd->wave_control_execute(dev->adev,
+                                       reg_gfx_index.u32All,
+                                       reg_sq_cmd.u32All);
+
+       return 0;
+}
+
 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
  * to avoid asynchronized access
  */
@@ -1034,7 +1124,7 @@ static void pre_reset(struct device_queue_manager *dqm)
 }
 
 static int allocate_sdma_queue(struct device_queue_manager *dqm,
-                               struct queue *q)
+                               struct queue *q, const uint32_t *restore_sdma_id)
 {
        int bit;
 
@@ -1044,9 +1134,21 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
                        return -ENOMEM;
                }
 
-               bit = __ffs64(dqm->sdma_bitmap);
-               dqm->sdma_bitmap &= ~(1ULL << bit);
-               q->sdma_id = bit;
+               if (restore_sdma_id) {
+                       /* Re-use existing sdma_id */
+                       if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
+                               pr_err("SDMA queue already in use\n");
+                               return -EBUSY;
+                       }
+                       dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+                       q->sdma_id = *restore_sdma_id;
+               } else {
+                       /* Find first available sdma_id */
+                       bit = __ffs64(dqm->sdma_bitmap);
+                       dqm->sdma_bitmap &= ~(1ULL << bit);
+                       q->sdma_id = bit;
+               }
+
                q->properties.sdma_engine_id = q->sdma_id %
                                kfd_get_num_sdma_engines(dqm->dev);
                q->properties.sdma_queue_id = q->sdma_id /
@@ -1056,9 +1158,19 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
                        pr_err("No more XGMI SDMA queue to allocate\n");
                        return -ENOMEM;
                }
-               bit = __ffs64(dqm->xgmi_sdma_bitmap);
-               dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
-               q->sdma_id = bit;
+               if (restore_sdma_id) {
+                       /* Re-use existing sdma_id */
+                       if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
+                               pr_err("SDMA queue already in use\n");
+                               return -EBUSY;
+                       }
+                       dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
+                       q->sdma_id = *restore_sdma_id;
+               } else {
+                       bit = __ffs64(dqm->xgmi_sdma_bitmap);
+                       dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
+                       q->sdma_id = bit;
+               }
                /* sdma_engine_id is sdma id including
                 * both PCIe-optimized SDMAs and XGMI-
                 * optimized SDMAs. The calculation below
@@ -1288,7 +1400,9 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
 }
 
 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
-                       struct qcm_process_device *qpd)
+                       struct qcm_process_device *qpd,
+                       const struct kfd_criu_queue_priv_data *qd,
+                       const void *restore_mqd, const void *restore_ctl_stack)
 {
        int retval;
        struct mqd_manager *mqd_mgr;
@@ -1303,13 +1417,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
                q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
                dqm_lock(dqm);
-               retval = allocate_sdma_queue(dqm, q);
+               retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
                dqm_unlock(dqm);
                if (retval)
                        goto out;
        }
 
-       retval = allocate_doorbell(qpd, q);
+       retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
        if (retval)
                goto out_deallocate_sdma_queue;
 
@@ -1334,8 +1448,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
         * updates the is_evicted flag but is a no-op otherwise.
         */
        q->properties.is_evicted = !!qpd->evicted;
-       mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
-                               &q->gart_mqd_addr, &q->properties);
+
+       if (qd)
+               mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
+                                    &q->properties, restore_mqd, restore_ctl_stack,
+                                    qd->ctl_stack_size);
+       else
+               mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
+                                       &q->gart_mqd_addr, &q->properties);
 
        list_add(&q->list, &qpd->queues_list);
        qpd->queue_count++;
@@ -1738,6 +1858,56 @@ static int get_wave_state(struct device_queue_manager *dqm,
                        ctl_stack_used_size, save_area_used_size);
 }
 
+static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
+                       const struct queue *q,
+                       u32 *mqd_size,
+                       u32 *ctl_stack_size)
+{
+       struct mqd_manager *mqd_mgr;
+       enum KFD_MQD_TYPE mqd_type =
+                       get_mqd_type_from_queue_type(q->properties.type);
+
+       dqm_lock(dqm);
+       mqd_mgr = dqm->mqd_mgrs[mqd_type];
+       *mqd_size = mqd_mgr->mqd_size;
+       *ctl_stack_size = 0;
+
+       if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
+               mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
+
+       dqm_unlock(dqm);
+}
+
+static int checkpoint_mqd(struct device_queue_manager *dqm,
+                         const struct queue *q,
+                         void *mqd,
+                         void *ctl_stack)
+{
+       struct mqd_manager *mqd_mgr;
+       int r = 0;
+       enum KFD_MQD_TYPE mqd_type =
+                       get_mqd_type_from_queue_type(q->properties.type);
+
+       dqm_lock(dqm);
+
+       if (q->properties.is_active || !q->device->cwsr_enabled) {
+               r = -EINVAL;
+               goto dqm_unlock;
+       }
+
+       mqd_mgr = dqm->mqd_mgrs[mqd_type];
+       if (!mqd_mgr->checkpoint_mqd) {
+               r = -EOPNOTSUPP;
+               goto dqm_unlock;
+       }
+
+       mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
+
+dqm_unlock:
+       dqm_unlock(dqm);
+       return r;
+}
+
 static int process_termination_cpsch(struct device_queue_manager *dqm,
                struct qcm_process_device *qpd)
 {
@@ -1915,6 +2085,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
                dqm->ops.restore_process_queues = restore_process_queues_cpsch;
                dqm->ops.get_wave_state = get_wave_state;
                dqm->ops.reset_queues = reset_queues_cpsch;
+               dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
+               dqm->ops.checkpoint_mqd = checkpoint_mqd;
                break;
        case KFD_SCHED_POLICY_NO_HWS:
                /* initialize dqm for no cp scheduling */
@@ -1934,6 +2106,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
                dqm->ops.restore_process_queues =
                        restore_process_queues_nocpsch;
                dqm->ops.get_wave_state = get_wave_state;
+               dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
+               dqm->ops.checkpoint_mqd = checkpoint_mqd;
                break;
        default:
                pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
@@ -2005,7 +2179,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
        kfree(dqm);
 }
 
-int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid)
+int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
 {
        struct kfd_process_device *pdd;
        struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
index e145e4deb53a778cf99f65bd43dabeb4fea411b9..05a9c17daa3e6ea5c2446388a65beab151027b89 100644 (file)
@@ -39,6 +39,41 @@ struct device_process_node {
        struct list_head list;
 };
 
+union SQ_CMD_BITS {
+       struct {
+               uint32_t cmd:3;
+               uint32_t:1;
+               uint32_t mode:3;
+               uint32_t check_vmid:1;
+               uint32_t trap_id:3;
+               uint32_t:5;
+               uint32_t wave_id:4;
+               uint32_t simd_id:2;
+               uint32_t:2;
+               uint32_t queue_id:3;
+               uint32_t:1;
+               uint32_t vm_id:4;
+       } bitfields, bits;
+       uint32_t u32All;
+       signed int i32All;
+       float f32All;
+};
+
+union GRBM_GFX_INDEX_BITS {
+       struct {
+               uint32_t instance_index:8;
+               uint32_t sh_index:8;
+               uint32_t se_index:8;
+               uint32_t:5;
+               uint32_t sh_broadcast_writes:1;
+               uint32_t instance_broadcast_writes:1;
+               uint32_t se_broadcast_writes:1;
+       } bitfields, bits;
+       uint32_t u32All;
+       signed int i32All;
+       float f32All;
+};
+
 /**
  * struct device_queue_manager_ops
  *
@@ -83,12 +118,18 @@ struct device_process_node {
  * control stack, if kept in the MQD, to the given userspace address.
  *
  * @reset_queues: reset queues which consume RAS poison
+ * @get_queue_checkpoint_info: Retrieves queue size information for CRIU checkpoint.
+ *
+ * @checkpoint_mqd: checkpoint queue MQD contents for CRIU.
  */
 
 struct device_queue_manager_ops {
        int     (*create_queue)(struct device_queue_manager *dqm,
                                struct queue *q,
-                               struct qcm_process_device *qpd);
+                               struct qcm_process_device *qpd,
+                               const struct kfd_criu_queue_priv_data *qd,
+                               const void *restore_mqd,
+                               const void *restore_ctl_stack);
 
        int     (*destroy_queue)(struct device_queue_manager *dqm,
                                struct qcm_process_device *qpd,
@@ -139,6 +180,14 @@ struct device_queue_manager_ops {
 
        int (*reset_queues)(struct device_queue_manager *dqm,
                                        uint16_t pasid);
+       void    (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
+                                 const struct queue *q, u32 *mqd_size,
+                                 u32 *ctl_stack_size);
+
+       int     (*checkpoint_mqd)(struct device_queue_manager *dqm,
+                                 const struct queue *q,
+                                 void *mqd,
+                                 void *ctl_stack);
 };
 
 struct device_queue_manager_asic_ops {
index afe72dd11325d79d468b50ddcc8a8692351b4817..b5eda1e04f34b6f2e33801fadeef16ff930bfc8d 100644 (file)
@@ -55,7 +55,6 @@ struct kfd_signal_page {
        bool need_to_free_pages;
 };
 
-
 static uint64_t *page_slots(struct kfd_signal_page *page)
 {
        return page->kernel_address;
@@ -92,7 +91,8 @@ fail_alloc_signal_store:
 }
 
 static int allocate_event_notification_slot(struct kfd_process *p,
-                                           struct kfd_event *ev)
+                                           struct kfd_event *ev,
+                                           const int *restore_id)
 {
        int id;
 
@@ -104,14 +104,19 @@ static int allocate_event_notification_slot(struct kfd_process *p,
                p->signal_mapped_size = 256*8;
        }
 
-       /*
-        * Compatibility with old user mode: Only use signal slots
-        * user mode has mapped, may be less than
-        * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
-        * of the event limit without breaking user mode.
-        */
-       id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
-                      GFP_KERNEL);
+       if (restore_id) {
+               id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
+                               GFP_KERNEL);
+       } else {
+               /*
+                * Compatibility with old user mode: Only use signal slots
+                * user mode has mapped, may be less than
+                * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
+                * of the event limit without breaking user mode.
+                */
+               id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
+                               GFP_KERNEL);
+       }
        if (id < 0)
                return id;
 
@@ -178,9 +183,8 @@ static struct kfd_event *lookup_signaled_event_by_partial_id(
        return ev;
 }
 
-static int create_signal_event(struct file *devkfd,
-                               struct kfd_process *p,
-                               struct kfd_event *ev)
+static int create_signal_event(struct file *devkfd, struct kfd_process *p,
+                               struct kfd_event *ev, const int *restore_id)
 {
        int ret;
 
@@ -193,7 +197,7 @@ static int create_signal_event(struct file *devkfd,
                return -ENOSPC;
        }
 
-       ret = allocate_event_notification_slot(p, ev);
+       ret = allocate_event_notification_slot(p, ev, restore_id);
        if (ret) {
                pr_warn("Signal event wasn't created because out of kernel memory\n");
                return ret;
@@ -209,16 +213,22 @@ static int create_signal_event(struct file *devkfd,
        return 0;
 }
 
-static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
+static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const int *restore_id)
 {
-       /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
-        * intentional integer overflow to -1 without a compiler
-        * warning. idr_alloc treats a negative value as "maximum
-        * signed integer".
-        */
-       int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
-                          (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
-                          GFP_KERNEL);
+       int id;
+
+       if (restore_id)
+               id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
+                       GFP_KERNEL);
+       else
+               /* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
+                * intentional integer overflow to -1 without a compiler
+                * warning. idr_alloc treats a negative value as "maximum
+                * signed integer".
+                */
+               id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
+                               (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
+                               GFP_KERNEL);
 
        if (id < 0)
                return id;
@@ -295,8 +305,8 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
        return ev->type == KFD_EVENT_TYPE_SIGNAL;
 }
 
-int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
-                      uint64_t size)
+static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
+                      uint64_t size, uint64_t user_handle)
 {
        struct kfd_signal_page *page;
 
@@ -315,10 +325,57 @@ int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
 
        p->signal_page = page;
        p->signal_mapped_size = size;
-
+       p->signal_handle = user_handle;
        return 0;
 }
 
+int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
+{
+       struct kfd_dev *kfd;
+       struct kfd_process_device *pdd;
+       void *mem, *kern_addr;
+       uint64_t size;
+       int err = 0;
+
+       if (p->signal_page) {
+               pr_err("Event page is already set\n");
+               return -EINVAL;
+       }
+
+       pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset));
+       if (!pdd) {
+               pr_err("Getting device by id failed in %s\n", __func__);
+               return -EINVAL;
+       }
+       kfd = pdd->dev;
+
+       pdd = kfd_bind_process_to_device(kfd, p);
+       if (IS_ERR(pdd))
+               return PTR_ERR(pdd);
+
+       mem = kfd_process_device_translate_handle(pdd,
+                       GET_IDR_HANDLE(event_page_offset));
+       if (!mem) {
+               pr_err("Can't find BO, offset is 0x%llx\n", event_page_offset);
+               return -EINVAL;
+       }
+
+       err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev,
+                                       mem, &kern_addr, &size);
+       if (err) {
+               pr_err("Failed to map event page to kernel\n");
+               return err;
+       }
+
+       err = kfd_event_page_set(p, kern_addr, size, event_page_offset);
+       if (err) {
+               pr_err("Failed to set event page\n");
+               amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem);
+               return err;
+       }
+       return err;
+}
+
 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
                     uint32_t event_type, bool auto_reset, uint32_t node_id,
                     uint32_t *event_id, uint32_t *event_trigger_data,
@@ -343,14 +400,14 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
        switch (event_type) {
        case KFD_EVENT_TYPE_SIGNAL:
        case KFD_EVENT_TYPE_DEBUG:
-               ret = create_signal_event(devkfd, p, ev);
+               ret = create_signal_event(devkfd, p, ev, NULL);
                if (!ret) {
                        *event_page_offset = KFD_MMAP_TYPE_EVENTS;
                        *event_slot_index = ev->event_id;
                }
                break;
        default:
-               ret = create_other_event(p, ev);
+               ret = create_other_event(p, ev, NULL);
                break;
        }
 
@@ -366,6 +423,166 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
        return ret;
 }
 
+int kfd_criu_restore_event(struct file *devkfd,
+                          struct kfd_process *p,
+                          uint8_t __user *user_priv_ptr,
+                          uint64_t *priv_data_offset,
+                          uint64_t max_priv_data_size)
+{
+       struct kfd_criu_event_priv_data *ev_priv;
+       struct kfd_event *ev = NULL;
+       int ret = 0;
+
+       ev_priv = kmalloc(sizeof(*ev_priv), GFP_KERNEL);
+       if (!ev_priv)
+               return -ENOMEM;
+
+       ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+       if (!ev) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       if (*priv_data_offset + sizeof(*ev_priv) > max_priv_data_size) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = copy_from_user(ev_priv, user_priv_ptr + *priv_data_offset, sizeof(*ev_priv));
+       if (ret) {
+               ret = -EFAULT;
+               goto exit;
+       }
+       *priv_data_offset += sizeof(*ev_priv);
+
+       if (ev_priv->user_handle) {
+               ret = kfd_kmap_event_page(p, ev_priv->user_handle);
+               if (ret)
+                       goto exit;
+       }
+
+       ev->type = ev_priv->type;
+       ev->auto_reset = ev_priv->auto_reset;
+       ev->signaled = ev_priv->signaled;
+
+       init_waitqueue_head(&ev->wq);
+
+       mutex_lock(&p->event_mutex);
+       switch (ev->type) {
+       case KFD_EVENT_TYPE_SIGNAL:
+       case KFD_EVENT_TYPE_DEBUG:
+               ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id);
+               break;
+       case KFD_EVENT_TYPE_MEMORY:
+               memcpy(&ev->memory_exception_data,
+                       &ev_priv->memory_exception_data,
+                       sizeof(struct kfd_hsa_memory_exception_data));
+
+               ret = create_other_event(p, ev, &ev_priv->event_id);
+               break;
+       case KFD_EVENT_TYPE_HW_EXCEPTION:
+               memcpy(&ev->hw_exception_data,
+                       &ev_priv->hw_exception_data,
+                       sizeof(struct kfd_hsa_hw_exception_data));
+
+               ret = create_other_event(p, ev, &ev_priv->event_id);
+               break;
+       }
+
+exit:
+       if (ret)
+               kfree(ev);
+
+       kfree(ev_priv);
+
+       mutex_unlock(&p->event_mutex);
+
+       return ret;
+}
+
+int kfd_criu_checkpoint_events(struct kfd_process *p,
+                        uint8_t __user *user_priv_data,
+                        uint64_t *priv_data_offset)
+{
+       struct kfd_criu_event_priv_data *ev_privs;
+       int i = 0;
+       int ret =  0;
+       struct kfd_event *ev;
+       uint32_t ev_id;
+
+       uint32_t num_events = kfd_get_num_events(p);
+
+       if (!num_events)
+               return 0;
+
+       ev_privs = kvzalloc(num_events * sizeof(*ev_privs), GFP_KERNEL);
+       if (!ev_privs)
+               return -ENOMEM;
+
+
+       idr_for_each_entry(&p->event_idr, ev, ev_id) {
+               struct kfd_criu_event_priv_data *ev_priv;
+
+               /*
+                * Currently, all events have same size of private_data, but the current ioctl's
+                * and CRIU plugin supports private_data of variable sizes
+                */
+               ev_priv = &ev_privs[i];
+
+               ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT;
+
+               /* We store the user_handle with the first event */
+               if (i == 0 && p->signal_page)
+                       ev_priv->user_handle = p->signal_handle;
+
+               ev_priv->event_id = ev->event_id;
+               ev_priv->auto_reset = ev->auto_reset;
+               ev_priv->type = ev->type;
+               ev_priv->signaled = ev->signaled;
+
+               if (ev_priv->type == KFD_EVENT_TYPE_MEMORY)
+                       memcpy(&ev_priv->memory_exception_data,
+                               &ev->memory_exception_data,
+                               sizeof(struct kfd_hsa_memory_exception_data));
+               else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION)
+                       memcpy(&ev_priv->hw_exception_data,
+                               &ev->hw_exception_data,
+                               sizeof(struct kfd_hsa_hw_exception_data));
+
+               pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n",
+                         i,
+                         ev_priv->event_id,
+                         ev_priv->auto_reset,
+                         ev_priv->type,
+                         ev_priv->signaled);
+               i++;
+       }
+
+       ret = copy_to_user(user_priv_data + *priv_data_offset,
+                          ev_privs, num_events * sizeof(*ev_privs));
+       if (ret) {
+               pr_err("Failed to copy events priv to user\n");
+               ret = -EFAULT;
+       }
+
+       *priv_data_offset += num_events * sizeof(*ev_privs);
+
+       kvfree(ev_privs);
+       return ret;
+}
+
+int kfd_get_num_events(struct kfd_process *p)
+{
+       struct kfd_event *ev;
+       uint32_t id;
+       u32 num_events = 0;
+
+       idr_for_each_entry(&p->event_idr, ev, id)
+               num_events++;
+
+       return num_events;
+}
+
 /* Assumes that p is current. */
 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
 {
@@ -878,6 +1095,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
 {
        struct kfd_hsa_memory_exception_data memory_exception_data;
        struct vm_area_struct *vma;
+       int user_gpu_id;
 
        /*
         * Because we are called from arbitrary context (workqueue) as opposed
@@ -899,12 +1117,17 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
                return; /* Process is exiting */
        }
 
+       user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+       if (unlikely(user_gpu_id == -EINVAL)) {
+               WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+               return;
+       }
        memset(&memory_exception_data, 0, sizeof(memory_exception_data));
 
        mmap_read_lock(mm);
        vma = find_vma(mm, address);
 
-       memory_exception_data.gpu_id = dev->id;
+       memory_exception_data.gpu_id = user_gpu_id;
        memory_exception_data.va = address;
        /* Set failure reason */
        memory_exception_data.failure.NotPresent = 1;
@@ -980,11 +1203,19 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
        uint32_t id;
        struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
        struct kfd_hsa_memory_exception_data memory_exception_data;
+       int user_gpu_id;
 
        if (!p)
                return; /* Presumably process exited. */
+
+       user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+       if (unlikely(user_gpu_id == -EINVAL)) {
+               WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+               return;
+       }
+
        memset(&memory_exception_data, 0, sizeof(memory_exception_data));
-       memory_exception_data.gpu_id = dev->id;
+       memory_exception_data.gpu_id = user_gpu_id;
        memory_exception_data.failure.imprecise = true;
        /* Set failure reason */
        if (info) {
@@ -1024,27 +1255,34 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
 
        /* Whole gpu reset caused by GPU hang and memory is lost */
        memset(&hw_exception_data, 0, sizeof(hw_exception_data));
-       hw_exception_data.gpu_id = dev->id;
        hw_exception_data.memory_lost = 1;
        hw_exception_data.reset_cause = reset_cause;
 
        memset(&memory_exception_data, 0, sizeof(memory_exception_data));
        memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
-       memory_exception_data.gpu_id = dev->id;
        memory_exception_data.failure.imprecise = true;
 
        idx = srcu_read_lock(&kfd_processes_srcu);
        hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+               int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+
+               if (unlikely(user_gpu_id == -EINVAL)) {
+                       WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+                       continue;
+               }
+
                mutex_lock(&p->event_mutex);
                id = KFD_FIRST_NONSIGNAL_EVENT_ID;
                idr_for_each_entry_continue(&p->event_idr, ev, id) {
                        if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
                                ev->hw_exception_data = hw_exception_data;
+                               ev->hw_exception_data.gpu_id = user_gpu_id;
                                set_event(ev);
                        }
                        if (ev->type == KFD_EVENT_TYPE_MEMORY &&
                            reset_cause == KFD_HW_EXCEPTION_ECC) {
                                ev->memory_exception_data = memory_exception_data;
+                               ev->memory_exception_data.gpu_id = user_gpu_id;
                                set_event(ev);
                        }
                }
@@ -1060,18 +1298,25 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid)
        struct kfd_hsa_hw_exception_data hw_exception_data;
        struct kfd_event *ev;
        uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+       int user_gpu_id;
 
        if (!p)
                return; /* Presumably process exited. */
 
+       user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
+       if (unlikely(user_gpu_id == -EINVAL)) {
+               WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
+               return;
+       }
+
        memset(&hw_exception_data, 0, sizeof(hw_exception_data));
-       hw_exception_data.gpu_id = dev->id;
+       hw_exception_data.gpu_id = user_gpu_id;
        hw_exception_data.memory_lost = 1;
        hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC;
 
        memset(&memory_exception_data, 0, sizeof(memory_exception_data));
        memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED;
-       memory_exception_data.gpu_id = dev->id;
+       memory_exception_data.gpu_id = user_gpu_id;
        memory_exception_data.failure.imprecise = true;
 
        mutex_lock(&p->event_mutex);
index e8bc28009c22b28e0127c791a109f52920f8c5e3..68ee923a440ba86659eee64e0ce7d0cbd62ad933 100644 (file)
@@ -109,8 +109,7 @@ static void event_interrupt_poison_consumption(struct kfd_dev *dev,
 
        switch (source_id) {
        case SOC15_INTSRC_SQ_INTERRUPT_MSG:
-               if (dev->dqm->ops.reset_queues)
-                       ret = dev->dqm->ops.reset_queues(dev->dqm, pasid);
+               kfd_dqm_evict_pasid(dev->dqm, pasid);
                break;
        case SOC15_INTSRC_SDMA_ECC:
        default:
@@ -308,7 +307,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
                info.prot_write = ring_id & 0x20;
 
                kfd_smi_event_update_vmfault(dev, pasid);
-               kfd_process_vm_fault(dev->dqm, pasid);
+               kfd_dqm_evict_pasid(dev->dqm, pasid);
                kfd_signal_vm_fault_event(dev, pasid, &info);
        }
 }
index 66ad8d0b8f7fbda48302fa0e1105dacba7e5ff39..fe62407dacb2763bc9cc7cdd81547ce0448c3c3e 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/pci.h>
 #include <linux/amd-iommu.h>
 #include "kfd_priv.h"
-#include "kfd_dbgmgr.h"
 #include "kfd_topology.h"
 #include "kfd_iommu.h"
 
@@ -163,17 +162,6 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, u32 pasid)
 
        pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
 
-       mutex_lock(kfd_get_dbgmgr_mutex());
-
-       if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
-               if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
-                       kfd_dbgmgr_destroy(dev->dbgmgr);
-                       dev->dbgmgr = NULL;
-               }
-       }
-
-       mutex_unlock(kfd_get_dbgmgr_mutex());
-
        mutex_lock(&p->mutex);
 
        pdd = kfd_get_process_device_data(dev, p);
index ed5385137f4831ee71ed191ad3dc97e50b8dde71..7dcafd337bb234ad0f71f4f3c8ef9d36a2103184 100644 (file)
@@ -36,7 +36,7 @@
 #ifdef dev_fmt
 #undef dev_fmt
 #endif
-#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
+#define dev_fmt(fmt) "kfd_migrate: " fmt
 
 static uint64_t
 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
@@ -86,10 +86,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
 
        cpu_addr = &job->ibs[0].ptr[num_dw];
 
-       r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
-       if (r)
-               goto error_free;
-
+       amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
        if (r)
@@ -315,7 +312,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 
        r = svm_range_vram_node_new(adev, prange, true);
        if (r) {
-               dev_err(adev->dev, "fail %d to alloc vram\n", r);
+               dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
                goto out;
        }
 
@@ -334,7 +331,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                                              DMA_TO_DEVICE);
                        r = dma_mapping_error(dev, src[i]);
                        if (r) {
-                               dev_err(adev->dev, "fail %d dma_map_page\n", r);
+                               dev_err(adev->dev, "%s: fail %d dma_map_page\n",
+                                       __func__, r);
                                goto out_free_vram_pages;
                        }
                } else {
@@ -435,8 +433,8 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 
        r = migrate_vma_setup(&migrate);
        if (r) {
-               dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
-                       prange->start, prange->last);
+               dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
+                       __func__, r, prange->start, prange->last);
                goto out_free;
        }
 
@@ -614,7 +612,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
                dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
                r = dma_mapping_error(dev, dst[i]);
                if (r) {
-                       dev_err(adev->dev, "fail %d dma_map_page\n", r);
+                       dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
                        goto out_oom;
                }
 
@@ -674,8 +672,8 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
 
        r = migrate_vma_setup(&migrate);
        if (r) {
-               dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
-                       prange->start, prange->last);
+               dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
+                       __func__, r, prange->start, prange->last);
                goto out_free;
        }
 
index e2825ad4d6997f180861976c443cec10bb8ba9df..9b7544ddced77e1aff4152ec86d224b291e5f81e 100644 (file)
@@ -173,3 +173,66 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
                }
        }
 }
+
+int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+                    uint32_t pipe_id, uint32_t queue_id,
+                    struct queue_properties *p, struct mm_struct *mms)
+{
+       return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
+                                             queue_id, p->doorbell_off);
+}
+
+int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
+               enum kfd_preempt_type type, unsigned int timeout,
+               uint32_t pipe_id, uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
+                                               pipe_id, queue_id);
+}
+
+void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
+             struct kfd_mem_obj *mqd_mem_obj)
+{
+       if (mqd_mem_obj->gtt_mem) {
+               amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
+               kfree(mqd_mem_obj);
+       } else {
+               kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+       }
+}
+
+bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
+                uint64_t queue_address, uint32_t pipe_id,
+                uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
+                                               pipe_id, queue_id);
+}
+
+int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+                 uint32_t pipe_id, uint32_t queue_id,
+                 struct queue_properties *p, struct mm_struct *mms)
+{
+       return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
+                                               (uint32_t __user *)p->write_ptr,
+                                               mms);
+}
+
+/*
+ * preempt type here is ignored because there is only one way
+ * to preempt sdma queue
+ */
+int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+                    enum kfd_preempt_type type,
+                    unsigned int timeout, uint32_t pipe_id,
+                    uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
+}
+
+bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+                     uint64_t queue_address, uint32_t pipe_id,
+                     uint32_t queue_id)
+{
+       return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
+}
index 965e17c5dbb49abfbcdc3e6217531d7a7077f809..d81ae0699300b9149f005ce3c2871a0d34475133 100644 (file)
@@ -100,6 +100,20 @@ struct mqd_manager {
                                  u32 *ctl_stack_used_size,
                                  u32 *save_area_used_size);
 
+       void    (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size);
+
+       void    (*checkpoint_mqd)(struct mqd_manager *mm,
+                                 void *mqd,
+                                 void *mqd_dst,
+                                 void *ctl_stack_dst);
+
+       void    (*restore_mqd)(struct mqd_manager *mm, void **mqd,
+                               struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                               struct queue_properties *p,
+                               const void *mqd_src,
+                               const void *ctl_stack_src,
+                               const u32 ctl_stack_size);
+
 #if defined(CONFIG_DEBUG_FS)
        int     (*debugfs_show_mqd)(struct seq_file *m, void *data);
 #endif
@@ -122,4 +136,31 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
                const uint32_t *cu_mask, uint32_t cu_mask_count,
                uint32_t *se_mask);
 
+int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+               uint32_t pipe_id, uint32_t queue_id,
+               struct queue_properties *p, struct mm_struct *mms);
+
+int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
+               enum kfd_preempt_type type, unsigned int timeout,
+               uint32_t pipe_id, uint32_t queue_id);
+
+void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
+               struct kfd_mem_obj *mqd_mem_obj);
+
+bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
+                uint64_t queue_address, uint32_t pipe_id,
+                uint32_t queue_id);
+
+int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+               uint32_t pipe_id, uint32_t queue_id,
+               struct queue_properties *p, struct mm_struct *mms);
+
+int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+               enum kfd_preempt_type type, unsigned int timeout,
+               uint32_t pipe_id, uint32_t queue_id);
+
+bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+               uint64_t queue_address, uint32_t pipe_id,
+               uint32_t queue_id);
+
 #endif /* KFD_MQD_MANAGER_H_ */
index e9a8e21e144ed629bc97b85383e99bf02377518c..06aae5df6a379dbece38f3d4bb66b85d3b755697 100644 (file)
@@ -156,13 +156,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
        mm->update_mqd(mm, m, q, NULL);
 }
 
-static void free_mqd(struct mqd_manager *mm, void *mqd,
-                       struct kfd_mem_obj *mqd_mem_obj)
-{
-       kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-
 static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
                    uint32_t queue_id, struct queue_properties *p,
                    struct mm_struct *mms)
@@ -176,15 +169,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
                                          wptr_shift, wptr_mask, mms);
 }
 
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-                        uint32_t pipe_id, uint32_t queue_id,
-                        struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
-                                              (uint32_t __user *)p->write_ptr,
-                                              mms);
-}
-
 static void __update_mqd(struct mqd_manager *mm, void *mqd,
                        struct queue_properties *q, struct mqd_update_info *minfo,
                        unsigned int atc_bit)
@@ -271,42 +255,75 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
-                       enum kfd_preempt_type type,
-                       unsigned int timeout, uint32_t pipe_id,
-                       uint32_t queue_id)
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
 {
-       return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
-                                       pipe_id, queue_id);
+       struct cik_mqd *m;
+
+       m = get_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct cik_mqd));
 }
 
-/*
- * preempt type here is ignored because there is only one way
- * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-                               enum kfd_preempt_type type,
-                               unsigned int timeout, uint32_t pipe_id,
-                               uint32_t queue_id)
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *qp,
+                       const void *mqd_src,
+                       const void *ctl_stack_src, const u32 ctl_stack_size)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
+       uint64_t addr;
+       struct cik_mqd *m;
+
+       m = (struct cik_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(qp->doorbell_off);
+
+       pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+                       m->cp_hqd_pq_doorbell_control);
+
+       qp->is_active = 0;
 }
 
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
-                       uint64_t queue_address, uint32_t pipe_id,
-                       uint32_t queue_id)
+static void checkpoint_mqd_sdma(struct mqd_manager *mm,
+                               void *mqd,
+                               void *mqd_dst,
+                               void *ctl_stack_dst)
 {
+       struct cik_sdma_rlc_registers *m;
 
-       return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
-                                       pipe_id, queue_id);
+       m = get_sdma_mqd(mqd);
 
+       memcpy(mqd_dst, m, sizeof(struct cik_sdma_rlc_registers));
 }
 
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-                       uint64_t queue_address, uint32_t pipe_id,
-                       uint32_t queue_id)
+static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
+                               struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                               struct queue_properties *qp,
+                               const void *mqd_src,
+                               const void *ctl_stack_src, const u32 ctl_stack_size)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
+       uint64_t addr;
+       struct cik_sdma_rlc_registers *m;
+
+       m = (struct cik_sdma_rlc_registers *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       m->sdma_rlc_doorbell =
+               qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       qp->is_active = 0;
 }
 
 /*
@@ -389,11 +406,13 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_CP:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
+               mqd->checkpoint_mqd = checkpoint_mqd;
+               mqd->restore_mqd = restore_mqd;
                mqd->mqd_size = sizeof(struct cik_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -405,8 +424,8 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
                mqd->free_mqd = free_mqd_hiq_sdma;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd_hiq;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct cik_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -416,11 +435,11 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_DIQ:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd_hiq;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd_hiq;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct cik_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -430,10 +449,12 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_sdma_mqd;
                mqd->init_mqd = init_mqd_sdma;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = load_mqd_sdma;
+               mqd->load_mqd = kfd_load_mqd_sdma;
                mqd->update_mqd = update_mqd_sdma;
-               mqd->destroy_mqd = destroy_mqd_sdma;
-               mqd->is_occupied = is_occupied_sdma;
+               mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+               mqd->is_occupied = kfd_is_occupied_sdma;
+               mqd->checkpoint_mqd = checkpoint_mqd_sdma;
+               mqd->restore_mqd = restore_mqd_sdma;
                mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
index d74d8a6ac27ae0631a24db1b4d9108fb8d0c788f..88228009ded46be80624ee501d479ebb204325dd 100644 (file)
@@ -154,14 +154,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
        return r;
 }
 
-static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
-                           uint32_t pipe_id, uint32_t queue_id,
-                           struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
-                                             queue_id, p->doorbell_off);
-}
-
 static void update_mqd(struct mqd_manager *mm, void *mqd,
                        struct queue_properties *q,
                        struct mqd_update_info *minfo)
@@ -233,31 +225,6 @@ static uint32_t read_doorbell_id(void *mqd)
        return m->queue_doorbell_id0;
 }
 
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
-                      enum kfd_preempt_type type,
-                      unsigned int timeout, uint32_t pipe_id,
-                      uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_destroy
-               (mm->dev->adev, mqd, type, timeout,
-                pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
-                       struct kfd_mem_obj *mqd_mem_obj)
-{
-       kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
-                       uint64_t queue_address, uint32_t pipe_id,
-                       uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_is_occupied(
-               mm->dev->adev, queue_address,
-               pipe_id, queue_id);
-}
-
 static int get_wave_state(struct mqd_manager *mm, void *mqd,
                          void __user *ctl_stack,
                          u32 *ctl_stack_used_size,
@@ -285,6 +252,42 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
        return 0;
 }
 
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
+{
+       struct v10_compute_mqd *m;
+
+       m = get_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct v10_compute_mqd));
+}
+
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *qp,
+                       const void *mqd_src,
+                       const void *ctl_stack_src, const u32 ctl_stack_size)
+{
+       uint64_t addr;
+       struct v10_compute_mqd *m;
+
+       m = (struct v10_compute_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       m->cp_hqd_pq_doorbell_control =
+               qp->doorbell_off <<
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+       pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+                       m->cp_hqd_pq_doorbell_control);
+
+       qp->is_active = 0;
+}
+
 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                        struct queue_properties *q)
@@ -316,15 +319,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
        mm->update_mqd(mm, m, q, NULL);
 }
 
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               uint32_t pipe_id, uint32_t queue_id,
-               struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
-                                              (uint32_t __user *)p->write_ptr,
-                                              mms);
-}
-
 #define SDMA_RLC_DUMMY_DEFAULT 0xf
 
 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -354,23 +348,41 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
-/*
- *  * preempt type here is ignored because there is only one way
- *  * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               enum kfd_preempt_type type,
-               unsigned int timeout, uint32_t pipe_id,
-               uint32_t queue_id)
+static void checkpoint_mqd_sdma(struct mqd_manager *mm,
+                               void *mqd,
+                               void *mqd_dst,
+                               void *ctl_stack_dst)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
+       struct v10_sdma_mqd *m;
+
+       m = get_sdma_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct v10_sdma_mqd));
 }
 
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-               uint64_t queue_address, uint32_t pipe_id,
-               uint32_t queue_id)
+static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
+                            struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                            struct queue_properties *qp,
+                            const void *mqd_src,
+                            const void *ctl_stack_src,
+                            const u32 ctl_stack_size)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
+       uint64_t addr;
+       struct v10_sdma_mqd *m;
+
+       m = (struct v10_sdma_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       m->sdmax_rlcx_doorbell_offset =
+               qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       qp->is_active = 0;
 }
 
 #if defined(CONFIG_DEBUG_FS)
@@ -410,13 +422,15 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
                pr_debug("%s@%i\n", __func__, __LINE__);
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v10_compute_mqd);
                mqd->get_wave_state = get_wave_state;
+               mqd->checkpoint_mqd = checkpoint_mqd;
+               mqd->restore_mqd = restore_mqd;
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
 #endif
@@ -427,10 +441,10 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_hiq_mqd;
                mqd->init_mqd = init_mqd_hiq;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = hiq_load_mqd_kiq;
+               mqd->load_mqd = kfd_hiq_load_mqd_kiq;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v10_compute_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -441,11 +455,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_DIQ:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd_hiq;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v10_compute_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -456,10 +470,12 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_sdma_mqd;
                mqd->init_mqd = init_mqd_sdma;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = load_mqd_sdma;
+               mqd->load_mqd = kfd_load_mqd_sdma;
                mqd->update_mqd = update_mqd_sdma;
-               mqd->destroy_mqd = destroy_mqd_sdma;
-               mqd->is_occupied = is_occupied_sdma;
+               mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+               mqd->is_occupied = kfd_is_occupied_sdma;
+               mqd->checkpoint_mqd = checkpoint_mqd_sdma;
+               mqd->restore_mqd = restore_mqd_sdma;
                mqd->mqd_size = sizeof(struct v10_sdma_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
index 326eb2285029865b81de61d422393e6f64093587..dff58ee53557e78c168d5daea66a697fbaeac40f 100644 (file)
@@ -204,14 +204,6 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
                                          wptr_shift, 0, mms);
 }
 
-static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
-                           uint32_t pipe_id, uint32_t queue_id,
-                           struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
-                                             queue_id, p->doorbell_off);
-}
-
 static void update_mqd(struct mqd_manager *mm, void *mqd,
                        struct queue_properties *q,
                        struct mqd_update_info *minfo)
@@ -285,38 +277,6 @@ static uint32_t read_doorbell_id(void *mqd)
        return m->queue_doorbell_id0;
 }
 
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
-                       enum kfd_preempt_type type,
-                       unsigned int timeout, uint32_t pipe_id,
-                       uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_destroy
-               (mm->dev->adev, mqd, type, timeout,
-               pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
-                       struct kfd_mem_obj *mqd_mem_obj)
-{
-       struct kfd_dev *kfd = mm->dev;
-
-       if (mqd_mem_obj->gtt_mem) {
-               amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem);
-               kfree(mqd_mem_obj);
-       } else {
-               kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-       }
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
-                       uint64_t queue_address, uint32_t pipe_id,
-                       uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_is_occupied(
-               mm->dev->adev, queue_address,
-               pipe_id, queue_id);
-}
-
 static int get_wave_state(struct mqd_manager *mm, void *mqd,
                          void __user *ctl_stack,
                          u32 *ctl_stack_used_size,
@@ -340,6 +300,57 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
        return 0;
 }
 
+static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+{
+       struct v9_mqd *m = get_mqd(mqd);
+
+       *ctl_stack_size = m->cp_hqd_cntl_stack_size;
+}
+
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
+{
+       struct v9_mqd *m;
+       /* Control stack is located one page after MQD. */
+       void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
+
+       m = get_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct v9_mqd));
+       memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
+}
+
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *qp,
+                       const void *mqd_src,
+                       const void *ctl_stack_src, u32 ctl_stack_size)
+{
+       uint64_t addr;
+       struct v9_mqd *m;
+       void *ctl_stack;
+
+       m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       /* Control stack is located one page after MQD. */
+       ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
+       memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
+
+       m->cp_hqd_pq_doorbell_control =
+               qp->doorbell_off <<
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+       pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+                               m->cp_hqd_pq_doorbell_control);
+
+       qp->is_active = 0;
+}
+
 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                        struct queue_properties *q)
@@ -371,15 +382,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
        mm->update_mqd(mm, m, q, NULL);
 }
 
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               uint32_t pipe_id, uint32_t queue_id,
-               struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
-                                              (uint32_t __user *)p->write_ptr,
-                                              mms);
-}
-
 #define SDMA_RLC_DUMMY_DEFAULT 0xf
 
 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -409,23 +411,40 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
-/*
- *  * preempt type here is ignored because there is only one way
- *  * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               enum kfd_preempt_type type,
-               unsigned int timeout, uint32_t pipe_id,
-               uint32_t queue_id)
+static void checkpoint_mqd_sdma(struct mqd_manager *mm,
+                               void *mqd,
+                               void *mqd_dst,
+                               void *ctl_stack_dst)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
+       struct v9_sdma_mqd *m;
+
+       m = get_sdma_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
 }
 
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-               uint64_t queue_address, uint32_t pipe_id,
-               uint32_t queue_id)
+static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
+                            struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                            struct queue_properties *qp,
+                            const void *mqd_src,
+                            const void *ctl_stack_src, const u32 ctl_stack_size)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
+       uint64_t addr;
+       struct v9_sdma_mqd *m;
+
+       m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       m->sdmax_rlcx_doorbell_offset =
+               qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       qp->is_active = 0;
 }
 
 #if defined(CONFIG_DEBUG_FS)
@@ -464,12 +483,15 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_CP:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->get_wave_state = get_wave_state;
+               mqd->get_checkpoint_info = get_checkpoint_info;
+               mqd->checkpoint_mqd = checkpoint_mqd;
+               mqd->restore_mqd = restore_mqd;
                mqd->mqd_size = sizeof(struct v9_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -479,10 +501,10 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_hiq_mqd;
                mqd->init_mqd = init_mqd_hiq;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = hiq_load_mqd_kiq;
+               mqd->load_mqd = kfd_hiq_load_mqd_kiq;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v9_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -492,11 +514,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_DIQ:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd_hiq;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct v9_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -506,10 +528,12 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_sdma_mqd;
                mqd->init_mqd = init_mqd_sdma;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = load_mqd_sdma;
+               mqd->load_mqd = kfd_load_mqd_sdma;
                mqd->update_mqd = update_mqd_sdma;
-               mqd->destroy_mqd = destroy_mqd_sdma;
-               mqd->is_occupied = is_occupied_sdma;
+               mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+               mqd->is_occupied = kfd_is_occupied_sdma;
+               mqd->checkpoint_mqd = checkpoint_mqd_sdma;
+               mqd->restore_mqd = restore_mqd_sdma;
                mqd->mqd_size = sizeof(struct v9_sdma_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
index d456e950ce1d9241b4426f1f7f74e3ed29ef0aa0..86fc5decacf04a6d348726eec4c6e4ceee6814e3 100644 (file)
@@ -259,31 +259,6 @@ static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
        __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
 }
 
-static int destroy_mqd(struct mqd_manager *mm, void *mqd,
-                       enum kfd_preempt_type type,
-                       unsigned int timeout, uint32_t pipe_id,
-                       uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_destroy
-               (mm->dev->adev, mqd, type, timeout,
-               pipe_id, queue_id);
-}
-
-static void free_mqd(struct mqd_manager *mm, void *mqd,
-                       struct kfd_mem_obj *mqd_mem_obj)
-{
-       kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
-}
-
-static bool is_occupied(struct mqd_manager *mm, void *mqd,
-                       uint64_t queue_address, uint32_t pipe_id,
-                       uint32_t queue_id)
-{
-       return mm->dev->kfd2kgd->hqd_is_occupied(
-               mm->dev->adev, queue_address,
-               pipe_id, queue_id);
-}
-
 static int get_wave_state(struct mqd_manager *mm, void *mqd,
                          void __user *ctl_stack,
                          u32 *ctl_stack_used_size,
@@ -306,6 +281,48 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
        return 0;
 }
 
+static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+{
+       /* Control stack is stored in user mode */
+       *ctl_stack_size = 0;
+}
+
+static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
+{
+       struct vi_mqd *m;
+
+       m = get_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct vi_mqd));
+}
+
+static void restore_mqd(struct mqd_manager *mm, void **mqd,
+                       struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                       struct queue_properties *qp,
+                       const void *mqd_src,
+                       const void *ctl_stack_src, const u32 ctl_stack_size)
+{
+       uint64_t addr;
+       struct vi_mqd *m;
+
+       m = (struct vi_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       m->cp_hqd_pq_doorbell_control =
+               qp->doorbell_off <<
+                       CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+       pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
+                       m->cp_hqd_pq_doorbell_control);
+
+       qp->is_active = 0;
+}
+
 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
                        struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
                        struct queue_properties *q)
@@ -343,15 +360,6 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
        mm->update_mqd(mm, m, q, NULL);
 }
 
-static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               uint32_t pipe_id, uint32_t queue_id,
-               struct queue_properties *p, struct mm_struct *mms)
-{
-       return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
-                                              (uint32_t __user *)p->write_ptr,
-                                              mms);
-}
-
 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
                        struct queue_properties *q,
                        struct mqd_update_info *minfo)
@@ -380,27 +388,45 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
        q->is_active = QUEUE_IS_ACTIVE(*q);
 }
 
-/*
- *  * preempt type here is ignored because there is only one way
- *  * to preempt sdma queue
- */
-static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
-               enum kfd_preempt_type type,
-               unsigned int timeout, uint32_t pipe_id,
-               uint32_t queue_id)
+static void checkpoint_mqd_sdma(struct mqd_manager *mm,
+                               void *mqd,
+                               void *mqd_dst,
+                               void *ctl_stack_dst)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
+       struct vi_sdma_mqd *m;
+
+       m = get_sdma_mqd(mqd);
+
+       memcpy(mqd_dst, m, sizeof(struct vi_sdma_mqd));
 }
 
-static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
-               uint64_t queue_address, uint32_t pipe_id,
-               uint32_t queue_id)
+static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
+                            struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
+                            struct queue_properties *qp,
+                            const void *mqd_src,
+                            const void *ctl_stack_src, const u32 ctl_stack_size)
 {
-       return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
+       uint64_t addr;
+       struct vi_sdma_mqd *m;
+
+       m = (struct vi_sdma_mqd *) mqd_mem_obj->cpu_ptr;
+       addr = mqd_mem_obj->gpu_addr;
+
+       memcpy(m, mqd_src, sizeof(*m));
+
+       m->sdmax_rlcx_doorbell =
+               qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
+
+       *mqd = m;
+       if (gart_addr)
+               *gart_addr = addr;
+
+       qp->is_active = 0;
 }
 
 #if defined(CONFIG_DEBUG_FS)
 
+
 static int debugfs_show_mqd(struct seq_file *m, void *data)
 {
        seq_hex_dump(m, "    ", DUMP_PREFIX_OFFSET, 32, 4,
@@ -435,12 +461,15 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_CP:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->get_wave_state = get_wave_state;
+               mqd->get_checkpoint_info = get_checkpoint_info;
+               mqd->checkpoint_mqd = checkpoint_mqd;
+               mqd->restore_mqd = restore_mqd;
                mqd->mqd_size = sizeof(struct vi_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -452,8 +481,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
                mqd->free_mqd = free_mqd_hiq_sdma;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd_hiq;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct vi_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -463,11 +492,11 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
        case KFD_MQD_TYPE_DIQ:
                mqd->allocate_mqd = allocate_mqd;
                mqd->init_mqd = init_mqd_hiq;
-               mqd->free_mqd = free_mqd;
+               mqd->free_mqd = kfd_free_mqd_cp;
                mqd->load_mqd = load_mqd;
                mqd->update_mqd = update_mqd_hiq;
-               mqd->destroy_mqd = destroy_mqd;
-               mqd->is_occupied = is_occupied;
+               mqd->destroy_mqd = kfd_destroy_mqd_cp;
+               mqd->is_occupied = kfd_is_occupied_cp;
                mqd->mqd_size = sizeof(struct vi_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd;
@@ -477,10 +506,12 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
                mqd->allocate_mqd = allocate_sdma_mqd;
                mqd->init_mqd = init_mqd_sdma;
                mqd->free_mqd = free_mqd_hiq_sdma;
-               mqd->load_mqd = load_mqd_sdma;
+               mqd->load_mqd = kfd_load_mqd_sdma;
                mqd->update_mqd = update_mqd_sdma;
-               mqd->destroy_mqd = destroy_mqd_sdma;
-               mqd->is_occupied = is_occupied_sdma;
+               mqd->destroy_mqd = kfd_destroy_mqd_sdma;
+               mqd->is_occupied = kfd_is_occupied_sdma;
+               mqd->checkpoint_mqd = checkpoint_mqd_sdma;
+               mqd->restore_mqd = restore_mqd_sdma;
                mqd->mqd_size = sizeof(struct vi_sdma_mqd);
 #if defined(CONFIG_DEBUG_FS)
                mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
index ea68f3b3a4e9cbb884becd4c799bd120f39cf3a6..b6790a637f5c8f5ec211c943b19e4ebc3025332c 100644 (file)
  */
 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
 
-
+/**
+ * enum kfd_ioctl_flags - KFD ioctl flags
+ * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how
+ * userspace can use a given ioctl.
+ */
+enum kfd_ioctl_flags {
+       /*
+        * @KFD_IOC_FLAG_CHECKPOINT_RESTORE:
+        * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially
+        * perform privileged operations and load arbitrary data into MQDs and
+        * eventually HQD registers when the queue is mapped by HWS. In order to
+        * prevent this we should perform additional security checks.
+        *
+        * This is equivalent to callers with the CHECKPOINT_RESTORE capability.
+        *
+        * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE,
+        * we also allow ioctls with SYS_ADMIN capability.
+        */
+       KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0),
+};
 /*
  * Kernel module parameter to specify maximum number of supported queues per
  * device
@@ -281,9 +300,6 @@ struct kfd_dev {
         */
        bool interrupts_active;
 
-       /* Debug manager */
-       struct kfd_dbgmgr *dbgmgr;
-
        /* Firmware versions */
        uint16_t mec_fw_version;
        uint16_t mec2_fw_version;
@@ -442,6 +458,7 @@ enum KFD_QUEUE_PRIORITY {
  * it's user mode or kernel mode queue.
  *
  */
+
 struct queue_properties {
        enum kfd_queue_type type;
        enum kfd_queue_format format;
@@ -754,6 +771,12 @@ struct kfd_process_device {
        uint64_t faults;
        uint64_t page_in;
        uint64_t page_out;
+       /*
+        * If this process has been checkpointed before, then the user
+        * application will use the original gpu_id on the
+        * checkpointed node to refer to this device.
+        */
+       uint32_t user_gpu_id;
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -764,6 +787,7 @@ struct svm_range_list {
        struct list_head                list;
        struct work_struct              deferred_list_work;
        struct list_head                deferred_range_list;
+       struct list_head                criu_svm_metadata_list;
        spinlock_t                      deferred_list_lock;
        atomic_t                        evicted_ranges;
        atomic_t                        drain_pagefaults;
@@ -858,6 +882,8 @@ struct kfd_process {
        bool xnack_enabled;
 
        atomic_t poison;
+       /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */
+       bool queues_paused;
 };
 
 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
@@ -911,6 +937,11 @@ int kfd_process_restore_queues(struct kfd_process *p);
 void kfd_suspend_all_processes(void);
 int kfd_resume_all_processes(void);
 
+struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process,
+                                                        uint32_t gpu_id);
+
+int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id);
+
 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
                               struct file *drm_file);
 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
@@ -932,6 +963,7 @@ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
                                        int handle);
 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
                                        int handle);
+struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid);
 
 /* PASIDs */
 int kfd_pasid_init(void);
@@ -1006,6 +1038,116 @@ void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
                                  uint64_t tba_addr,
                                  uint64_t tma_addr);
 
+/* CRIU */
+/*
+ * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private
+ * structures:
+ * kfd_criu_process_priv_data
+ * kfd_criu_device_priv_data
+ * kfd_criu_bo_priv_data
+ * kfd_criu_queue_priv_data
+ * kfd_criu_event_priv_data
+ * kfd_criu_svm_range_priv_data
+ */
+
+#define KFD_CRIU_PRIV_VERSION 1
+
+struct kfd_criu_process_priv_data {
+       uint32_t version;
+       uint32_t xnack_mode;
+};
+
+struct kfd_criu_device_priv_data {
+       /* For future use */
+       uint64_t reserved;
+};
+
+struct kfd_criu_bo_priv_data {
+       uint64_t user_addr;
+       uint32_t idr_handle;
+       uint32_t mapped_gpuids[MAX_GPU_INSTANCE];
+};
+
+/*
+ * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data,
+ * kfd_criu_svm_range_priv_data is the object type
+ */
+enum kfd_criu_object_type {
+       KFD_CRIU_OBJECT_TYPE_QUEUE,
+       KFD_CRIU_OBJECT_TYPE_EVENT,
+       KFD_CRIU_OBJECT_TYPE_SVM_RANGE,
+};
+
+struct kfd_criu_svm_range_priv_data {
+       uint32_t object_type;
+       uint64_t start_addr;
+       uint64_t size;
+       /* Variable length array of attributes */
+       struct kfd_ioctl_svm_attribute attrs[0];
+};
+
+struct kfd_criu_queue_priv_data {
+       uint32_t object_type;
+       uint64_t q_address;
+       uint64_t q_size;
+       uint64_t read_ptr_addr;
+       uint64_t write_ptr_addr;
+       uint64_t doorbell_off;
+       uint64_t eop_ring_buffer_address;
+       uint64_t ctx_save_restore_area_address;
+       uint32_t gpu_id;
+       uint32_t type;
+       uint32_t format;
+       uint32_t q_id;
+       uint32_t priority;
+       uint32_t q_percent;
+       uint32_t doorbell_id;
+       uint32_t is_gws;
+       uint32_t sdma_id;
+       uint32_t eop_ring_buffer_size;
+       uint32_t ctx_save_restore_area_size;
+       uint32_t ctl_stack_size;
+       uint32_t mqd_size;
+};
+
+struct kfd_criu_event_priv_data {
+       uint32_t object_type;
+       uint64_t user_handle;
+       uint32_t event_id;
+       uint32_t auto_reset;
+       uint32_t type;
+       uint32_t signaled;
+
+       union {
+               struct kfd_hsa_memory_exception_data memory_exception_data;
+               struct kfd_hsa_hw_exception_data hw_exception_data;
+       };
+};
+
+int kfd_process_get_queue_info(struct kfd_process *p,
+                              uint32_t *num_queues,
+                              uint64_t *priv_data_sizes);
+
+int kfd_criu_checkpoint_queues(struct kfd_process *p,
+                        uint8_t __user *user_priv_data,
+                        uint64_t *priv_data_offset);
+
+int kfd_criu_restore_queue(struct kfd_process *p,
+                          uint8_t __user *user_priv_data,
+                          uint64_t *priv_data_offset,
+                          uint64_t max_priv_data_size);
+
+int kfd_criu_checkpoint_events(struct kfd_process *p,
+                        uint8_t __user *user_priv_data,
+                        uint64_t *priv_data_offset);
+
+int kfd_criu_restore_event(struct file *devkfd,
+                          struct kfd_process *p,
+                          uint8_t __user *user_priv_data,
+                          uint64_t *priv_data_offset,
+                          uint64_t max_priv_data_size);
+/* CRIU - End */
+
 /* Queue Context Management */
 int init_queue(struct queue **q, const struct queue_properties *properties);
 void uninit_queue(struct queue *q);
@@ -1029,7 +1171,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
                                        enum kfd_queue_type type);
 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
-int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid);
+int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
 
 /* Process Queue Manager */
 struct process_queue_node {
@@ -1047,6 +1189,9 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                            struct file *f,
                            struct queue_properties *properties,
                            unsigned int *qid,
+                           const struct kfd_criu_queue_priv_data *q_data,
+                           const void *restore_mqd,
+                           const void *restore_ctl_stack,
                            uint32_t *p_doorbell_offset_in_process);
 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
@@ -1069,6 +1214,10 @@ int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
                              uint64_t fence_value,
                              unsigned int timeout_ms);
 
+int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
+                                 unsigned int qid,
+                                 u32 *mqd_size,
+                                 u32 *ctl_stack_size);
 /* Packet Manager */
 
 #define KFD_FENCE_COMPLETED (100)
@@ -1160,12 +1309,14 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
 void kfd_signal_hw_exception_event(u32 pasid);
 int kfd_set_event(struct kfd_process *p, uint32_t event_id);
 int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
-int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
-                      uint64_t size);
+int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset);
+
 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
                     uint32_t event_type, bool auto_reset, uint32_t node_id,
                     uint32_t *event_id, uint32_t *event_trigger_data,
                     uint64_t *event_page_offset, uint32_t *event_slot_index);
+
+int kfd_get_num_events(struct kfd_process *p);
 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
 
 void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
@@ -1177,8 +1328,6 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
 
 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
 
-int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
-
 bool kfd_is_locked(void);
 
 /* Compute profile */
index d1145da5348f4cbe76c037eae6973d44ce0100fe..8c6a48add76e3688c51b2eb7dbead161cc2c9203 100644 (file)
@@ -40,7 +40,6 @@ struct mm_struct;
 
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
-#include "kfd_dbgmgr.h"
 #include "kfd_iommu.h"
 #include "kfd_svm.h"
 
@@ -64,7 +63,8 @@ static struct workqueue_struct *kfd_process_wq;
  */
 static struct workqueue_struct *kfd_restore_wq;
 
-static struct kfd_process *find_process(const struct task_struct *thread);
+static struct kfd_process *find_process(const struct task_struct *thread,
+                                       bool ref);
 static void kfd_process_ref_release(struct kref *ref);
 static struct kfd_process *create_process(const struct task_struct *thread);
 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
@@ -715,7 +715,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
        int err;
 
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
-                                                pdd->drm_priv, mem, NULL, flags);
+                                                pdd->drm_priv, mem, NULL,
+                                                flags, false);
        if (err)
                goto err_alloc_mem;
 
@@ -816,7 +817,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
        mutex_lock(&kfd_processes_mutex);
 
        /* A prior open of /dev/kfd could have already created the process. */
-       process = find_process(thread);
+       process = find_process(thread, false);
        if (process) {
                pr_debug("Process already found\n");
        } else {
@@ -884,7 +885,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
        if (thread->group_leader->mm != thread->mm)
                return ERR_PTR(-EINVAL);
 
-       process = find_process(thread);
+       process = find_process(thread, false);
        if (!process)
                return ERR_PTR(-EINVAL);
 
@@ -903,13 +904,16 @@ static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
        return NULL;
 }
 
-static struct kfd_process *find_process(const struct task_struct *thread)
+static struct kfd_process *find_process(const struct task_struct *thread,
+                                       bool ref)
 {
        struct kfd_process *p;
        int idx;
 
        idx = srcu_read_lock(&kfd_processes_srcu);
        p = find_process_by_mm(thread->mm);
+       if (p && ref)
+               kref_get(&p->ref);
        srcu_read_unlock(&kfd_processes_srcu, idx);
 
        return p;
@@ -920,6 +924,26 @@ void kfd_unref_process(struct kfd_process *p)
        kref_put(&p->ref, kfd_process_ref_release);
 }
 
+/* This increments the process->ref counter. */
+struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
+{
+       struct task_struct *task = NULL;
+       struct kfd_process *p    = NULL;
+
+       if (!pid) {
+               task = current;
+               get_task_struct(task);
+       } else {
+               task = get_pid_task(pid, PIDTYPE_PID);
+       }
+
+       if (task) {
+               p = find_process(task, true);
+               put_task_struct(task);
+       }
+
+       return p;
+}
 
 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 {
@@ -1133,7 +1157,6 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
                                        struct mm_struct *mm)
 {
        struct kfd_process *p;
-       int i;
 
        /*
         * The kfd_process structure can not be free because the
@@ -1150,27 +1173,9 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
 
        cancel_delayed_work_sync(&p->eviction_work);
        cancel_delayed_work_sync(&p->restore_work);
-       cancel_delayed_work_sync(&p->svms.restore_work);
 
        mutex_lock(&p->mutex);
 
-       /* Iterate over all process device data structures and if the
-        * pdd is in debug mode, we should first force unregistration,
-        * then we will be able to destroy the queues
-        */
-       for (i = 0; i < p->n_pdds; i++) {
-               struct kfd_dev *dev = p->pdds[i]->dev;
-
-               mutex_lock(kfd_get_dbgmgr_mutex());
-               if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
-                       if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
-                               kfd_dbgmgr_destroy(dev->dbgmgr);
-                               dev->dbgmgr = NULL;
-                       }
-               }
-               mutex_unlock(kfd_get_dbgmgr_mutex());
-       }
-
        kfd_process_dequeue_from_all_devices(p);
        pqm_uninit(&p->pqm);
 
@@ -1360,6 +1365,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        process->mm = thread->mm;
        process->lead_thread = thread->group_leader;
        process->n_pdds = 0;
+       process->queues_paused = false;
        INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
        INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
        process->last_restore_timestamp = get_jiffies_64();
@@ -1501,6 +1507,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->runtime_inuse = false;
        pdd->vram_usage = 0;
        pdd->sdma_past_activity_counter = 0;
+       pdd->user_gpu_id = dev->id;
        atomic64_set(&pdd->evict_duration_counter, 0);
        p->pdds[p->n_pdds++] = pdd;
 
@@ -1771,7 +1778,7 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
        int i;
 
        for (i = 0; i < p->n_pdds; i++)
-               if (p->pdds[i] && gpu_id == p->pdds[i]->dev->id)
+               if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
                        return i;
        return -EINVAL;
 }
@@ -1784,7 +1791,7 @@ kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev,
 
        for (i = 0; i < p->n_pdds; i++)
                if (p->pdds[i] && p->pdds[i]->dev->adev == adev) {
-                       *gpuid = p->pdds[i]->dev->id;
+                       *gpuid = p->pdds[i]->user_gpu_id;
                        *gpuidx = i;
                        return 0;
                }
@@ -1956,6 +1963,37 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
        }
 }
 
+struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
+{
+       int i;
+
+       if (gpu_id) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *pdd = p->pdds[i];
+
+                       if (pdd->user_gpu_id == gpu_id)
+                               return pdd;
+               }
+       }
+       return NULL;
+}
+
+int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
+{
+       int i;
+
+       if (!actual_gpu_id)
+               return 0;
+
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               if (pdd->dev->id == actual_gpu_id)
+                       return pdd->user_gpu_id;
+       }
+       return -EINVAL;
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
index 5e5c84a8e1ef70ba9a2fe849c26dd25337949a39..70ea4849e6a2be88c4c72357bd27a4853dcd54b5 100644 (file)
@@ -42,6 +42,20 @@ static inline struct process_queue_node *get_queue_by_qid(
        return NULL;
 }
 
+static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
+                                   unsigned int qid)
+{
+       if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+               return -EINVAL;
+
+       if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
+               pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 static int find_available_queue_slot(struct process_queue_manager *pqm,
                                        unsigned int *qid)
 {
@@ -193,6 +207,9 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                            struct file *f,
                            struct queue_properties *properties,
                            unsigned int *qid,
+                           const struct kfd_criu_queue_priv_data *q_data,
+                           const void *restore_mqd,
+                           const void *restore_ctl_stack,
                            uint32_t *p_doorbell_offset_in_process)
 {
        int retval;
@@ -224,7 +241,12 @@ int pqm_create_queue(struct process_queue_manager *pqm,
        if (pdd->qpd.queue_count >= max_queues)
                return -ENOSPC;
 
-       retval = find_available_queue_slot(pqm, qid);
+       if (q_data) {
+               retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
+               *qid = q_data->q_id;
+       } else
+               retval = find_available_queue_slot(pqm, qid);
+
        if (retval != 0)
                return retval;
 
@@ -252,7 +274,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                        goto err_create_queue;
                pqn->q = q;
                pqn->kq = NULL;
-               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
+                                                   restore_mqd, restore_ctl_stack);
                print_queue(q);
                break;
 
@@ -272,7 +295,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                        goto err_create_queue;
                pqn->q = q;
                pqn->kq = NULL;
-               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
+                                                   restore_mqd, restore_ctl_stack);
                print_queue(q);
                break;
        case KFD_QUEUE_TYPE_DIQ:
@@ -497,6 +521,348 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                                                       save_area_used_size);
 }
 
+static int get_queue_data_sizes(struct kfd_process_device *pdd,
+                               struct queue *q,
+                               uint32_t *mqd_size,
+                               uint32_t *ctl_stack_size)
+{
+       int ret;
+
+       ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
+                                           q->properties.queue_id,
+                                           mqd_size,
+                                           ctl_stack_size);
+       if (ret)
+               pr_err("Failed to get queue dump info (%d)\n", ret);
+
+       return ret;
+}
+
+int kfd_process_get_queue_info(struct kfd_process *p,
+                              uint32_t *num_queues,
+                              uint64_t *priv_data_sizes)
+{
+       uint32_t extra_data_sizes = 0;
+       struct queue *q;
+       int i;
+       int ret;
+
+       *num_queues = 0;
+
+       /* Run over all PDDs of the process */
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               list_for_each_entry(q, &pdd->qpd.queues_list, list) {
+                       if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+                               q->properties.type == KFD_QUEUE_TYPE_SDMA ||
+                               q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
+                               uint32_t mqd_size, ctl_stack_size;
+
+                               *num_queues = *num_queues + 1;
+
+                               ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
+                               if (ret)
+                                       return ret;
+
+                               extra_data_sizes += mqd_size + ctl_stack_size;
+                       } else {
+                               pr_err("Unsupported queue type (%d)\n", q->properties.type);
+                               return -EOPNOTSUPP;
+                       }
+               }
+       }
+       *priv_data_sizes = extra_data_sizes +
+                               (*num_queues * sizeof(struct kfd_criu_queue_priv_data));
+
+       return 0;
+}
+
+static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
+                             unsigned int qid,
+                             void *mqd,
+                             void *ctl_stack)
+{
+       struct process_queue_node *pqn;
+
+       pqn = get_queue_by_qid(pqm, qid);
+       if (!pqn) {
+               pr_debug("amdkfd: No queue %d exists for operation\n", qid);
+               return -EFAULT;
+       }
+
+       if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
+               pr_err("amdkfd: queue dumping not supported on this device\n");
+               return -EOPNOTSUPP;
+       }
+
+       return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
+                                                      pqn->q, mqd, ctl_stack);
+}
+
+static int criu_checkpoint_queue(struct kfd_process_device *pdd,
+                          struct queue *q,
+                          struct kfd_criu_queue_priv_data *q_data)
+{
+       uint8_t *mqd, *ctl_stack;
+       int ret;
+
+       mqd = (void *)(q_data + 1);
+       ctl_stack = mqd + q_data->mqd_size;
+
+       q_data->gpu_id = pdd->user_gpu_id;
+       q_data->type = q->properties.type;
+       q_data->format = q->properties.format;
+       q_data->q_id =  q->properties.queue_id;
+       q_data->q_address = q->properties.queue_address;
+       q_data->q_size = q->properties.queue_size;
+       q_data->priority = q->properties.priority;
+       q_data->q_percent = q->properties.queue_percent;
+       q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
+       q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
+       q_data->doorbell_id = q->doorbell_id;
+
+       q_data->sdma_id = q->sdma_id;
+
+       q_data->eop_ring_buffer_address =
+               q->properties.eop_ring_buffer_address;
+
+       q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
+
+       q_data->ctx_save_restore_area_address =
+               q->properties.ctx_save_restore_area_address;
+
+       q_data->ctx_save_restore_area_size =
+               q->properties.ctx_save_restore_area_size;
+
+       ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
+       if (ret) {
+               pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
+               return ret;
+       }
+
+       pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
+       return ret;
+}
+
+static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
+                                  uint8_t __user *user_priv,
+                                  unsigned int *q_index,
+                                  uint64_t *queues_priv_data_offset)
+{
+       unsigned int q_private_data_size = 0;
+       uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
+       struct queue *q;
+       int ret = 0;
+
+       list_for_each_entry(q, &pdd->qpd.queues_list, list) {
+               struct kfd_criu_queue_priv_data *q_data;
+               uint64_t q_data_size;
+               uint32_t mqd_size;
+               uint32_t ctl_stack_size;
+
+               if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
+                       q->properties.type != KFD_QUEUE_TYPE_SDMA &&
+                       q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
+
+                       pr_err("Unsupported queue type (%d)\n", q->properties.type);
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
+               if (ret)
+                       break;
+
+               q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
+
+               /* Increase local buffer space if needed */
+               if (q_private_data_size < q_data_size) {
+                       kfree(q_private_data);
+
+                       q_private_data = kzalloc(q_data_size, GFP_KERNEL);
+                       if (!q_private_data) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       q_private_data_size = q_data_size;
+               }
+
+               q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
+
+               /* data stored in this order: priv_data, mqd, ctl_stack */
+               q_data->mqd_size = mqd_size;
+               q_data->ctl_stack_size = ctl_stack_size;
+
+               ret = criu_checkpoint_queue(pdd, q, q_data);
+               if (ret)
+                       break;
+
+               q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
+
+               ret = copy_to_user(user_priv + *queues_priv_data_offset,
+                               q_data, q_data_size);
+               if (ret) {
+                       ret = -EFAULT;
+                       break;
+               }
+               *queues_priv_data_offset += q_data_size;
+               *q_index = *q_index + 1;
+       }
+
+       kfree(q_private_data);
+
+       return ret;
+}
+
+int kfd_criu_checkpoint_queues(struct kfd_process *p,
+                        uint8_t __user *user_priv_data,
+                        uint64_t *priv_data_offset)
+{
+       int ret = 0, pdd_index, q_index = 0;
+
+       for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
+               struct kfd_process_device *pdd = p->pdds[pdd_index];
+
+               /*
+                * criu_checkpoint_queues_device will copy data to user and update q_index and
+                * queues_priv_data_offset
+                */
+               ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
+                                             priv_data_offset);
+
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static void set_queue_properties_from_criu(struct queue_properties *qp,
+                                         struct kfd_criu_queue_priv_data *q_data)
+{
+       qp->is_interop = false;
+       qp->is_gws = q_data->is_gws;
+       qp->queue_percent = q_data->q_percent;
+       qp->priority = q_data->priority;
+       qp->queue_address = q_data->q_address;
+       qp->queue_size = q_data->q_size;
+       qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
+       qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
+       qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
+       qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
+       qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
+       qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
+       qp->ctl_stack_size = q_data->ctl_stack_size;
+       qp->type = q_data->type;
+       qp->format = q_data->format;
+}
+
+int kfd_criu_restore_queue(struct kfd_process *p,
+                          uint8_t __user *user_priv_ptr,
+                          uint64_t *priv_data_offset,
+                          uint64_t max_priv_data_size)
+{
+       uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
+       struct kfd_criu_queue_priv_data *q_data;
+       struct kfd_process_device *pdd;
+       uint64_t q_extra_data_size;
+       struct queue_properties qp;
+       unsigned int queue_id;
+       int ret = 0;
+
+       if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
+               return -EINVAL;
+
+       q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
+       if (!q_data)
+               return -ENOMEM;
+
+       ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
+       if (ret) {
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       *priv_data_offset += sizeof(*q_data);
+       q_extra_data_size = q_data->ctl_stack_size + q_data->mqd_size;
+
+       if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
+       if (!q_extra_data) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
+       if (ret) {
+               ret = -EFAULT;
+               goto exit;
+       }
+
+       *priv_data_offset += q_extra_data_size;
+
+       pdd = kfd_process_device_data_by_id(p, q_data->gpu_id);
+       if (!pdd) {
+               pr_err("Failed to get pdd\n");
+               ret = -EINVAL;
+               goto exit;
+       }
+       /* data stored in this order: mqd, ctl_stack */
+       mqd = q_extra_data;
+       ctl_stack = mqd + q_data->mqd_size;
+
+       memset(&qp, 0, sizeof(qp));
+       set_queue_properties_from_criu(&qp, q_data);
+
+       print_queue_properties(&qp);
+
+       ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
+                               NULL);
+       if (ret) {
+               pr_err("Failed to create new queue err:%d\n", ret);
+               ret = -EINVAL;
+       }
+
+exit:
+       if (ret)
+               pr_err("Failed to create queue (%d)\n", ret);
+       else
+               pr_debug("Queue id %d was restored successfully\n", queue_id);
+
+       kfree(q_data);
+
+       return ret;
+}
+
+int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
+                                 unsigned int qid,
+                                 uint32_t *mqd_size,
+                                 uint32_t *ctl_stack_size)
+{
+       struct process_queue_node *pqn;
+
+       pqn = get_queue_by_qid(pqm, qid);
+       if (!pqn) {
+               pr_debug("amdkfd: No queue %d exists for operation\n", qid);
+               return -EFAULT;
+       }
+
+       if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
+               pr_err("amdkfd: queue dumping not supported on this device\n");
+               return -EOPNOTSUPP;
+       }
+
+       pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
+                                                      pqn->q, mqd_size,
+                                                      ctl_stack_size);
+       return 0;
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
 int pqm_debugfs_mqds(struct seq_file *m, void *data)
index deae12dc777d29a23c13f0b92aa776613d2b63c5..329a4c89f1e6769198a9f3cc1fbcae8b315e2fa9 100644 (file)
@@ -222,7 +222,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev,
 
        len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n",
                       KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask,
-                      atomic64_read(&dev->adev->smu.throttle_int_counter));
+                      amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
 
        add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len);
 }
index f2805ba74c80be6d9fd51701f82b8b0e9feb0f54..b71d47afd2437be4925d3ca2f1b4771a65cbe1df 100644 (file)
  */
 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING   2000
 
+struct criu_svm_metadata {
+       struct list_head list;
+       struct kfd_criu_svm_range_priv_data data;
+};
+
 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
 static bool
 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
@@ -1224,19 +1229,20 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
                        if (r)
                                break;
                }
-               amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev,
-                                       p->pasid, TLB_FLUSH_HEAVYWEIGHT);
+               kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
        }
 
        return r;
 }
 
 static int
-svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                    struct svm_range *prange, unsigned long offset,
-                    unsigned long npages, bool readonly, dma_addr_t *dma_addr,
-                    struct amdgpu_device *bo_adev, struct dma_fence **fence)
+svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
+                    unsigned long offset, unsigned long npages, bool readonly,
+                    dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
+                    struct dma_fence **fence)
 {
+       struct amdgpu_device *adev = pdd->dev->adev;
+       struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
        bool table_freed = false;
        uint64_t pte_flags;
        unsigned long last_start;
@@ -1300,12 +1306,8 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (fence)
                *fence = dma_fence_get(vm->last_update);
 
-       if (table_freed) {
-               struct kfd_process *p;
-
-               p = container_of(prange->svms, struct kfd_process, svms);
-               amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY);
-       }
+       if (table_freed)
+               kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
 out:
        return r;
 }
@@ -1346,8 +1348,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
                        continue;
                }
 
-               r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv),
-                                        prange, offset, npages, readonly,
+               r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
                                         prange->dma_addr[gpuidx],
                                         bo_adev, wait ? &fence : NULL);
                if (r)
@@ -1643,13 +1644,14 @@ static void svm_range_restore_work(struct work_struct *work)
 
        pr_debug("restore svm ranges\n");
 
-       /* kfd_process_notifier_release destroys this worker thread. So during
-        * the lifetime of this thread, kfd_process and mm will be valid.
-        */
        p = container_of(svms, struct kfd_process, svms);
-       mm = p->mm;
-       if (!mm)
+
+       /* Keep mm reference when svm_range_validate_and_map ranges */
+       mm = get_task_mm(p->lead_thread);
+       if (!mm) {
+               pr_debug("svms 0x%p process mm gone\n", svms);
                return;
+       }
 
        svm_range_list_lock_and_flush_work(svms, mm);
        mutex_lock(&svms->lock);
@@ -1703,6 +1705,7 @@ static void svm_range_restore_work(struct work_struct *work)
 out_reschedule:
        mutex_unlock(&svms->lock);
        mmap_write_unlock(mm);
+       mmput(mm);
 
        /* If validation failed, reschedule another attempt */
        if (evicted_ranges) {
@@ -1985,10 +1988,9 @@ svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
 }
 
 static void
-svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange)
+svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
+                        struct mm_struct *mm)
 {
-       struct mm_struct *mm = prange->work_item.mm;
-
        switch (prange->work_item.op) {
        case SVM_OP_NULL:
                pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
@@ -2065,40 +2067,44 @@ static void svm_range_deferred_list_work(struct work_struct *work)
        struct svm_range_list *svms;
        struct svm_range *prange;
        struct mm_struct *mm;
-       struct kfd_process *p;
 
        svms = container_of(work, struct svm_range_list, deferred_list_work);
        pr_debug("enter svms 0x%p\n", svms);
 
-       p = container_of(svms, struct kfd_process, svms);
-       /* Avoid mm is gone when inserting mmu notifier */
-       mm = get_task_mm(p->lead_thread);
-       if (!mm) {
-               pr_debug("svms 0x%p process mm gone\n", svms);
-               return;
-       }
-retry:
-       mmap_write_lock(mm);
-
-       /* Checking for the need to drain retry faults must be inside
-        * mmap write lock to serialize with munmap notifiers.
-        */
-       if (unlikely(atomic_read(&svms->drain_pagefaults))) {
-               mmap_write_unlock(mm);
-               svm_range_drain_retry_fault(svms);
-               goto retry;
-       }
-
        spin_lock(&svms->deferred_list_lock);
        while (!list_empty(&svms->deferred_range_list)) {
                prange = list_first_entry(&svms->deferred_range_list,
                                          struct svm_range, deferred_list);
-               list_del_init(&prange->deferred_list);
                spin_unlock(&svms->deferred_list_lock);
 
                pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
                         prange->start, prange->last, prange->work_item.op);
 
+               mm = prange->work_item.mm;
+retry:
+               mmap_write_lock(mm);
+
+               /* Checking for the need to drain retry faults must be inside
+                * mmap write lock to serialize with munmap notifiers.
+                */
+               if (unlikely(atomic_read(&svms->drain_pagefaults))) {
+                       mmap_write_unlock(mm);
+                       svm_range_drain_retry_fault(svms);
+                       goto retry;
+               }
+
+               /* Remove from deferred_list must be inside mmap write lock, for
+                * two race cases:
+                * 1. unmap_from_cpu may change work_item.op and add the range
+                *    to deferred_list again, cause use after free bug.
+                * 2. svm_range_list_lock_and_flush_work may hold mmap write
+                *    lock and continue because deferred_list is empty, but
+                *    deferred_list work is actually waiting for mmap lock.
+                */
+               spin_lock(&svms->deferred_list_lock);
+               list_del_init(&prange->deferred_list);
+               spin_unlock(&svms->deferred_list_lock);
+
                mutex_lock(&svms->lock);
                mutex_lock(&prange->migrate_mutex);
                while (!list_empty(&prange->child_list)) {
@@ -2109,19 +2115,20 @@ retry:
                        pr_debug("child prange 0x%p op %d\n", pchild,
                                 pchild->work_item.op);
                        list_del_init(&pchild->child_list);
-                       svm_range_handle_list_op(svms, pchild);
+                       svm_range_handle_list_op(svms, pchild, mm);
                }
                mutex_unlock(&prange->migrate_mutex);
 
-               svm_range_handle_list_op(svms, prange);
+               svm_range_handle_list_op(svms, prange, mm);
                mutex_unlock(&svms->lock);
+               mmap_write_unlock(mm);
+
+               /* Pairs with mmget in svm_range_add_list_work */
+               mmput(mm);
 
                spin_lock(&svms->deferred_list_lock);
        }
        spin_unlock(&svms->deferred_list_lock);
-
-       mmap_write_unlock(mm);
-       mmput(mm);
        pr_debug("exit svms 0x%p\n", svms);
 }
 
@@ -2139,6 +2146,9 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
                        prange->work_item.op = op;
        } else {
                prange->work_item.op = op;
+
+               /* Pairs with mmput in deferred_list_work */
+               mmget(mm);
                prange->work_item.mm = mm;
                list_add_tail(&prange->deferred_list,
                              &prange->svms->deferred_range_list);
@@ -2830,6 +2840,8 @@ void svm_range_list_fini(struct kfd_process *p)
 
        pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
 
+       cancel_delayed_work_sync(&p->svms.restore_work);
+
        /* Ensure list work is finished before process is destroyed */
        flush_work(&p->svms.deferred_list_work);
 
@@ -2840,7 +2852,6 @@ void svm_range_list_fini(struct kfd_process *p)
        atomic_inc(&p->svms.drain_pagefaults);
        svm_range_drain_retry_fault(&p->svms);
 
-
        list_for_each_entry_safe(prange, next, &p->svms.list, list) {
                svm_range_unlink(prange);
                svm_range_remove_notifier(prange);
@@ -2865,6 +2876,7 @@ int svm_range_list_init(struct kfd_process *p)
        INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
        INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
        INIT_LIST_HEAD(&svms->deferred_range_list);
+       INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
        spin_lock_init(&svms->deferred_list_lock);
 
        for (i = 0; i < p->n_pdds; i++)
@@ -3193,10 +3205,10 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
 }
 
 static int
-svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
-                  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
+svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
+                  uint64_t start, uint64_t size, uint32_t nattr,
+                  struct kfd_ioctl_svm_attribute *attrs)
 {
-       struct mm_struct *mm = current->mm;
        struct list_head update_list;
        struct list_head insert_list;
        struct list_head remove_list;
@@ -3295,8 +3307,9 @@ out:
 }
 
 static int
-svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
-                  uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
+svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
+                  uint64_t start, uint64_t size, uint32_t nattr,
+                  struct kfd_ioctl_svm_attribute *attrs)
 {
        DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
        DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
@@ -3306,7 +3319,6 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        bool get_accessible = false;
        bool get_flags = false;
        uint64_t last = start + size - 1UL;
-       struct mm_struct *mm = current->mm;
        uint8_t granularity = 0xff;
        struct interval_tree_node *node;
        struct svm_range_list *svms;
@@ -3471,10 +3483,321 @@ fill_values:
        return 0;
 }
 
+int kfd_criu_resume_svm(struct kfd_process *p)
+{
+       struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
+       int nattr_common = 4, nattr_accessibility = 1;
+       struct criu_svm_metadata *criu_svm_md = NULL;
+       struct svm_range_list *svms = &p->svms;
+       struct criu_svm_metadata *next = NULL;
+       uint32_t set_flags = 0xffffffff;
+       int i, j, num_attrs, ret = 0;
+       uint64_t set_attr_size;
+       struct mm_struct *mm;
+
+       if (list_empty(&svms->criu_svm_metadata_list)) {
+               pr_debug("No SVM data from CRIU restore stage 2\n");
+               return ret;
+       }
+
+       mm = get_task_mm(p->lead_thread);
+       if (!mm) {
+               pr_err("failed to get mm for the target process\n");
+               return -ESRCH;
+       }
+
+       num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
+
+       i = j = 0;
+       list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
+               pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
+                        i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
+
+               for (j = 0; j < num_attrs; j++) {
+                       pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
+                                i, j, criu_svm_md->data.attrs[j].type,
+                                i, j, criu_svm_md->data.attrs[j].value);
+                       switch (criu_svm_md->data.attrs[j].type) {
+                       /* During Checkpoint operation, the query for
+                        * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
+                        * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
+                        * not used by the range which was checkpointed. Care
+                        * must be taken to not restore with an invalid value
+                        * otherwise the gpuidx value will be invalid and
+                        * set_attr would eventually fail so just replace those
+                        * with another dummy attribute such as
+                        * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
+                        */
+                       case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
+                               if (criu_svm_md->data.attrs[j].value ==
+                                   KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
+                                       criu_svm_md->data.attrs[j].type =
+                                               KFD_IOCTL_SVM_ATTR_SET_FLAGS;
+                                       criu_svm_md->data.attrs[j].value = 0;
+                               }
+                               break;
+                       case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
+                               set_flags = criu_svm_md->data.attrs[j].value;
+                               break;
+                       default:
+                               break;
+                       }
+               }
+
+               /* CLR_FLAGS is not available via get_attr during checkpoint but
+                * it needs to be inserted before restoring the ranges so
+                * allocate extra space for it before calling set_attr
+                */
+               set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
+                                               (num_attrs + 1);
+               set_attr_new = krealloc(set_attr, set_attr_size,
+                                           GFP_KERNEL);
+               if (!set_attr_new) {
+                       ret = -ENOMEM;
+                       goto exit;
+               }
+               set_attr = set_attr_new;
+
+               memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
+                                       sizeof(struct kfd_ioctl_svm_attribute));
+               set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
+               set_attr[num_attrs].value = ~set_flags;
+
+               ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
+                                        criu_svm_md->data.size, num_attrs + 1,
+                                        set_attr);
+               if (ret) {
+                       pr_err("CRIU: failed to set range attributes\n");
+                       goto exit;
+               }
+
+               i++;
+       }
+exit:
+       kfree(set_attr);
+       list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
+               pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
+                                               criu_svm_md->data.start_addr);
+               kfree(criu_svm_md);
+       }
+
+       mmput(mm);
+       return ret;
+
+}
+
+int kfd_criu_restore_svm(struct kfd_process *p,
+                        uint8_t __user *user_priv_ptr,
+                        uint64_t *priv_data_offset,
+                        uint64_t max_priv_data_size)
+{
+       uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
+       int nattr_common = 4, nattr_accessibility = 1;
+       struct criu_svm_metadata *criu_svm_md = NULL;
+       struct svm_range_list *svms = &p->svms;
+       uint32_t num_devices;
+       int ret = 0;
+
+       num_devices = p->n_pdds;
+       /* Handle one SVM range object at a time, also the number of gpus are
+        * assumed to be same on the restore node, checking must be done while
+        * evaluating the topology earlier
+        */
+
+       svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
+               (nattr_common + nattr_accessibility * num_devices);
+       svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
+
+       svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
+                                                               svm_attrs_size;
+
+       criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
+       if (!criu_svm_md) {
+               pr_err("failed to allocate memory to store svm metadata\n");
+               return -ENOMEM;
+       }
+       if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
+                            svm_priv_data_size);
+       if (ret) {
+               ret = -EFAULT;
+               goto exit;
+       }
+       *priv_data_offset += svm_priv_data_size;
+
+       list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
+
+       return 0;
+
+
+exit:
+       kfree(criu_svm_md);
+       return ret;
+}
+
+int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+                      uint64_t *svm_priv_data_size)
+{
+       uint64_t total_size, accessibility_size, common_attr_size;
+       int nattr_common = 4, nattr_accessibility = 1;
+       int num_devices = p->n_pdds;
+       struct svm_range_list *svms;
+       struct svm_range *prange;
+       uint32_t count = 0;
+
+       *svm_priv_data_size = 0;
+
+       svms = &p->svms;
+       if (!svms)
+               return -EINVAL;
+
+       mutex_lock(&svms->lock);
+       list_for_each_entry(prange, &svms->list, list) {
+               pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
+                        prange, prange->start, prange->npages,
+                        prange->start + prange->npages - 1);
+               count++;
+       }
+       mutex_unlock(&svms->lock);
+
+       *num_svm_ranges = count;
+       /* Only the accessbility attributes need to be queried for all the gpus
+        * individually, remaining ones are spanned across the entire process
+        * regardless of the various gpu nodes. Of the remaining attributes,
+        * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
+        *
+        * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
+        * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
+        * KFD_IOCTL_SVM_ATTR_SET_FLAGS
+        * KFD_IOCTL_SVM_ATTR_GRANULARITY
+        *
+        * ** ACCESSBILITY ATTRIBUTES **
+        * (Considered as one, type is altered during query, value is gpuid)
+        * KFD_IOCTL_SVM_ATTR_ACCESS
+        * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
+        * KFD_IOCTL_SVM_ATTR_NO_ACCESS
+        */
+       if (*num_svm_ranges > 0) {
+               common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
+                       nattr_common;
+               accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
+                       nattr_accessibility * num_devices;
+
+               total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
+                       common_attr_size + accessibility_size;
+
+               *svm_priv_data_size = *num_svm_ranges * total_size;
+       }
+
+       pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
+                *svm_priv_data_size);
+       return 0;
+}
+
+int kfd_criu_checkpoint_svm(struct kfd_process *p,
+                           uint8_t __user *user_priv_data,
+                           uint64_t *priv_data_offset)
+{
+       struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
+       struct kfd_ioctl_svm_attribute *query_attr = NULL;
+       uint64_t svm_priv_data_size, query_attr_size = 0;
+       int index, nattr_common = 4, ret = 0;
+       struct svm_range_list *svms;
+       int num_devices = p->n_pdds;
+       struct svm_range *prange;
+       struct mm_struct *mm;
+
+       svms = &p->svms;
+       if (!svms)
+               return -EINVAL;
+
+       mm = get_task_mm(p->lead_thread);
+       if (!mm) {
+               pr_err("failed to get mm for the target process\n");
+               return -ESRCH;
+       }
+
+       query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
+                               (nattr_common + num_devices);
+
+       query_attr = kzalloc(query_attr_size, GFP_KERNEL);
+       if (!query_attr) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
+       query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
+       query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
+       query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
+
+       for (index = 0; index < num_devices; index++) {
+               struct kfd_process_device *pdd = p->pdds[index];
+
+               query_attr[index + nattr_common].type =
+                       KFD_IOCTL_SVM_ATTR_ACCESS;
+               query_attr[index + nattr_common].value = pdd->user_gpu_id;
+       }
+
+       svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
+
+       svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
+       if (!svm_priv) {
+               ret = -ENOMEM;
+               goto exit_query;
+       }
+
+       index = 0;
+       list_for_each_entry(prange, &svms->list, list) {
+
+               svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
+               svm_priv->start_addr = prange->start;
+               svm_priv->size = prange->npages;
+               memcpy(&svm_priv->attrs, query_attr, query_attr_size);
+               pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
+                        prange, prange->start, prange->npages,
+                        prange->start + prange->npages - 1,
+                        prange->npages * PAGE_SIZE);
+
+               ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
+                                        svm_priv->size,
+                                        (nattr_common + num_devices),
+                                        svm_priv->attrs);
+               if (ret) {
+                       pr_err("CRIU: failed to obtain range attributes\n");
+                       goto exit_priv;
+               }
+
+               if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
+                                svm_priv_data_size)) {
+                       pr_err("Failed to copy svm priv to user\n");
+                       ret = -EFAULT;
+                       goto exit_priv;
+               }
+
+               *priv_data_offset += svm_priv_data_size;
+
+       }
+
+
+exit_priv:
+       kfree(svm_priv);
+exit_query:
+       kfree(query_attr);
+exit:
+       mmput(mm);
+       return ret;
+}
+
 int
 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
          uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
 {
+       struct mm_struct *mm = current->mm;
        int r;
 
        start >>= PAGE_SHIFT;
@@ -3482,10 +3805,10 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
 
        switch (op) {
        case KFD_IOCTL_SVM_OP_SET_ATTR:
-               r = svm_range_set_attr(p, start, size, nattrs, attrs);
+               r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
                break;
        case KFD_IOCTL_SVM_OP_GET_ATTR:
-               r = svm_range_get_attr(p, start, size, nattrs, attrs);
+               r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
                break;
        default:
                r = EINVAL;
index 949b477e2f4c971c9c5d9aa004312848473f20e4..66c77f00ac3e4e8c554768b5df8b685e9e405d62 100644 (file)
@@ -183,6 +183,16 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
 void svm_range_free_dma_mappings(struct svm_range *prange);
 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
                        void *owner);
+int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
+                      uint64_t *svm_priv_data_size);
+int kfd_criu_checkpoint_svm(struct kfd_process *p,
+                           uint8_t __user *user_priv_data,
+                           uint64_t *priv_offset);
+int kfd_criu_restore_svm(struct kfd_process *p,
+                        uint8_t __user *user_priv_ptr,
+                        uint64_t *priv_data_offset,
+                        uint64_t max_priv_data_size);
+int kfd_criu_resume_svm(struct kfd_process *p);
 struct kfd_process_device *
 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
 void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
@@ -220,6 +230,35 @@ static inline int svm_range_schedule_evict_svm_bo(
        return -EINVAL;
 }
 
+static inline int svm_range_get_info(struct kfd_process *p,
+                                    uint32_t *num_svm_ranges,
+                                    uint64_t *svm_priv_data_size)
+{
+       *num_svm_ranges = 0;
+       *svm_priv_data_size = 0;
+       return 0;
+}
+
+static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,
+                                         uint8_t __user *user_priv_data,
+                                         uint64_t *priv_offset)
+{
+       return 0;
+}
+
+static inline int kfd_criu_restore_svm(struct kfd_process *p,
+                                      uint8_t __user *user_priv_ptr,
+                                      uint64_t *priv_data_offset,
+                                      uint64_t max_priv_data_size)
+{
+       return -EINVAL;
+}
+
+static inline int kfd_criu_resume_svm(struct kfd_process *p)
+{
+       return 0;
+}
+
 #define KFD_IS_SVM_API_SUPPORTED(dev) false
 
 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
index 526076e4bde35e9de011f9c67c7aeae877e34a18..be4852757818d45f193b6ab4228d9e86defb6659 100644 (file)
@@ -1027,7 +1027,6 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        const unsigned char *fw_inst_const, *fw_bss_data;
        uint32_t i, fw_inst_const_size, fw_bss_data_size;
        bool has_hw_support;
-       struct dc *dc = adev->dm.dc;
 
        if (!dmub_srv)
                /* DMUB isn't supported on the ASIC. */
@@ -1119,14 +1118,12 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        for (i = 0; i < fb_info->num_fb; ++i)
                hw_params.fb[i] = &fb_info->fb[i];
 
-       switch (adev->asic_type) {
-       case CHIP_YELLOW_CARP:
-               if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
-                       hw_params.dpia_supported = true;
+       switch (adev->ip_versions[DCE_HWIP][0]) {
+       case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */
+               hw_params.dpia_supported = true;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-                       hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
+               hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
 #endif
-               }
                break;
        default:
                break;
@@ -1496,10 +1493,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
 #endif
 
-       init_data.flags.power_down_display_on_boot = true;
+       init_data.flags.seamless_boot_edp_requested = false;
 
        if (check_seamless_boot_capability(adev)) {
-               init_data.flags.power_down_display_on_boot = false;
+               init_data.flags.seamless_boot_edp_requested = true;
                init_data.flags.allow_seamless_boot_optimization = true;
                DRM_INFO("Seamless boot condition check passed\n");
        }
@@ -2179,12 +2176,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
 
 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
        int ret = 0;
 
-       if (!is_support_sw_smu(adev))
-               return 0;
-
        /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
         * on window driver dc implementation.
         * For Navi1x, clock settings of dcn watermarks are fixed. the settings
@@ -2223,7 +2216,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
                return 0;
        }
 
-       ret = smu_write_watermarks_table(smu);
+       ret = amdgpu_dpm_write_watermarks_table(adev);
        if (ret) {
                DRM_ERROR("Failed to update WMTABLE!\n");
                return ret;
@@ -3653,7 +3646,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 
        /* Use GRPH_PFLIP interrupt */
        for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
-                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+                       i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
                        i++) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
                if (r) {
@@ -6435,8 +6428,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = amdgpu_freesync_vid_mode &&
-                                is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        saved_mode = mode;
@@ -6499,7 +6491,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
                                stream->use_vsc_sdp_for_colorimetry = true;
                }
-               mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+               mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
                aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
 
        }
@@ -8143,6 +8135,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
                mode = amdgpu_dm_create_common_mode(encoder,
                                common_modes[i].name, common_modes[i].w,
                                common_modes[i].h);
+               if (!mode)
+                       continue;
+
                drm_mode_probed_add(connector, mode);
                amdgpu_dm_connector->num_modes++;
        }
@@ -8304,7 +8299,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
 
-       if (!(amdgpu_freesync_vid_mode && edid))
+       if (!edid)
                return;
 
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -8371,7 +8366,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                break;
        case DRM_MODE_CONNECTOR_DisplayPort:
                aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
-               link->link_enc = dp_get_link_enc(link);
+               link->link_enc = link_enc_cfg_get_link_enc(link);
                ASSERT(link->link_enc);
                if (link->link_enc)
                        aconnector->base.ycbcr_420_allowed =
@@ -10271,8 +10266,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (amdgpu_freesync_vid_mode &&
-                   dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
 
@@ -10307,7 +10301,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
 
-               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+               if (dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
@@ -10319,7 +10313,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                        set_freesync_fixed_config(dm_new_crtc_state);
 
                        goto skip_modeset;
-               } else if (amdgpu_freesync_vid_mode && aconnector &&
+               } else if (aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
index bee806ae3e525a05cbda19e1249e45e7546b8671..28a2b9d476b47ede0abc0d5ff21c9fa1de097a57 100644 (file)
@@ -604,6 +604,7 @@ struct amdgpu_dm_connector {
 #endif
        bool force_yuv420_output;
        struct dsc_preferred_settings dsc_settings;
+       union dp_downstream_port_present mst_downstream_port_present;
        /* Cached display modes */
        struct drm_display_mode freesync_vid_base;
 
index 26719efa5396de309d242bb96029a5b1db2b2652..bdea177fae559995104c2d3e05cd98326cd543c2 100644 (file)
@@ -227,8 +227,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -389,8 +391,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user((*(rd_buf + result)), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -1359,8 +1363,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -1376,8 +1382,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -1546,8 +1554,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -1563,8 +1573,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -1731,8 +1743,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -1748,8 +1762,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -1912,8 +1928,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -1929,8 +1947,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -2088,8 +2108,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -2105,8 +2127,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -2145,8 +2169,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -2162,8 +2188,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -2217,8 +2245,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -2234,8 +2264,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -2289,8 +2321,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
                                break;
        }
 
-       if (!pipe_ctx)
+       if (!pipe_ctx) {
+               kfree(rd_buf);
                return -ENXIO;
+       }
 
        dsc = pipe_ctx->stream_res.dsc;
        if (dsc)
@@ -2306,8 +2340,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
                        break;
 
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
 
                buf += 1;
                size -= 1;
@@ -2851,7 +2887,7 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf,
                kfree(wr_buf);
                DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n");
                prefer_link_settings.use_link_rate_set = false;
-               dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true);
+               dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false);
                return size;
        }
 
@@ -3395,6 +3431,30 @@ static int dp_force_sst_get(void *data, u64 *val)
 }
 DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get,
                         dp_force_sst_set, "%llu\n");
+
+/*
+ * Force DP2 sequence without VESA certified cable.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_ignore_cable_id
+ */
+static int dp_ignore_cable_id_set(void *data, u64 val)
+{
+       struct amdgpu_device *adev = data;
+
+       adev->dm.dc->debug.ignore_cable_id = val;
+
+       return 0;
+}
+
+static int dp_ignore_cable_id_get(void *data, u64 *val)
+{
+       struct amdgpu_device *adev = data;
+
+       *val = adev->dm.dc->debug.ignore_cable_id;
+
+       return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get,
+                        dp_ignore_cable_id_set, "%llu\n");
 #endif
 
 /*
@@ -3459,8 +3519,10 @@ static ssize_t dcc_en_bits_read(
        dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
 
        rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
-       if (!rd_buf)
+       if (!rd_buf) {
+               kfree(dcc_en_bits);
                return -ENOMEM;
+       }
 
        for (i = 0; i < num_pipes; i++)
                offset += snprintf(rd_buf + offset, rd_buf_size - offset,
@@ -3473,8 +3535,10 @@ static ssize_t dcc_en_bits_read(
                if (*pos >= rd_buf_size)
                        break;
                r = put_user(*(rd_buf + result), buf);
-               if (r)
+               if (r) {
+                       kfree(rd_buf);
                        return r; /* r = -EFAULT */
+               }
                buf += 1;
                size -= 1;
                *pos += 1;
@@ -3509,6 +3573,8 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev,
                                &dp_set_mst_en_for_sst_ops);
+       debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev,
+                               &dp_ignore_cable_id_ops);
 #endif
 
        debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
index 29f07c26d080f94c58cc1f37fb5cc4105cea5bc4..db4ab01267e41d970dc258c653b6d40594fd5dd0 100644 (file)
@@ -39,6 +39,7 @@
 #include "amdgpu_dm_mst_types.h"
 
 #include "dm_helpers.h"
+#include "ddc_service_types.h"
 
 struct monitor_patch_info {
        unsigned int manufacturer_id;
@@ -445,40 +446,24 @@ bool dm_helpers_dp_mst_start_top_mgr(
        return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
 }
 
-void dm_helpers_dp_mst_stop_top_mgr(
+bool dm_helpers_dp_mst_stop_top_mgr(
                struct dc_context *ctx,
                struct dc_link *link)
 {
        struct amdgpu_dm_connector *aconnector = link->priv;
-       uint8_t i;
 
        if (!aconnector) {
                DRM_ERROR("Failed to find connector for link!");
-               return;
+               return false;
        }
 
        DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
                        aconnector, aconnector->base.base.id);
 
-       if (aconnector->mst_mgr.mst_state == true) {
+       if (aconnector->mst_mgr.mst_state == true)
                drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
 
-               for (i = 0; i < MAX_SINKS_PER_LINK; i++) {
-                       if (link->remote_sinks[i] == NULL)
-                               continue;
-
-                       if (link->remote_sinks[i]->sink_signal ==
-                           SIGNAL_TYPE_DISPLAY_PORT_MST) {
-                               dc_link_remove_remote_sink(link, link->remote_sinks[i]);
-
-                               if (aconnector->dc_sink) {
-                                       dc_sink_release(aconnector->dc_sink);
-                                       aconnector->dc_sink = NULL;
-                                       aconnector->dc_link->cur_link_settings.lane_count = 0;
-                               }
-                       }
-               }
-       }
+       return false;
 }
 
 bool dm_helpers_dp_read_dpcd(
@@ -552,6 +537,177 @@ bool dm_helpers_submit_i2c(
 
        return result;
 }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
+               bool is_write_cmd,
+               unsigned char cmd,
+               unsigned int length,
+               unsigned int offset,
+               unsigned char *data)
+{
+       bool success = false;
+       unsigned char rc_data[16] = {0};
+       unsigned char rc_offset[4] = {0};
+       unsigned char rc_length[2] = {0};
+       unsigned char rc_cmd = 0;
+       unsigned char rc_result = 0xFF;
+       unsigned char i = 0;
+       uint8_t ret = 0;
+
+       if (is_write_cmd) {
+               // write rc data
+               memmove(rc_data, data, length);
+               ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data));
+       }
+
+       // write rc offset
+       rc_offset[0] = (unsigned char) offset & 0xFF;
+       rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF;
+       rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF;
+       rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF;
+       ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset));
+
+       // write rc length
+       rc_length[0] = (unsigned char) length & 0xFF;
+       rc_length[1] = (unsigned char) (length >> 8) & 0xFF;
+       ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length));
+
+       // write rc cmd
+       rc_cmd = cmd | 0x80;
+       ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
+
+       if (ret < 0) {
+               DRM_ERROR("     execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
+               return false;
+       }
+
+       // poll until active is 0
+       for (i = 0; i < 10; i++) {
+               drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
+               if (rc_cmd == cmd)
+                       // active is 0
+                       break;
+               msleep(10);
+       }
+
+       // read rc result
+       drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result));
+       success = (rc_result == 0);
+
+       if (success && !is_write_cmd) {
+               // read rc data
+               drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
+       }
+
+       DC_LOG_DC("     execute_synaptics_rc_command - success = %d\n", success);
+
+       return success;
+}
+
+static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
+{
+       unsigned char data[16] = {0};
+
+       DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
+
+       // Step 2
+       data[0] = 'P';
+       data[1] = 'R';
+       data[2] = 'I';
+       data[3] = 'U';
+       data[4] = 'S';
+
+       if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data))
+               return;
+
+       // Step 3 and 4
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
+               return;
+
+       data[0] &= (~(1 << 1)); // set bit 1 to 0
+       if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
+               return;
+
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
+               return;
+
+       data[0] &= (~(1 << 1)); // set bit 1 to 0
+       if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data))
+               return;
+
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
+               return;
+
+       data[0] &= (~(1 << 1)); // set bit 1 to 0
+       if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
+               return;
+
+       // Step 3 and 5
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data))
+               return;
+
+       data[0] |= (1 << 1); // set bit 1 to 1
+       if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data))
+               return;
+
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data))
+               return;
+
+       data[0] |= (1 << 1); // set bit 1 to 1
+               return;
+
+       if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
+               return;
+
+       data[0] |= (1 << 1); // set bit 1 to 1
+       if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data))
+               return;
+
+       // Step 6
+       if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
+               return;
+
+       DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
+}
+
+static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
+               struct drm_dp_aux *aux,
+               const struct dc_stream_state *stream,
+               bool enable)
+{
+       uint8_t ret = 0;
+
+       DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n");
+
+       if (enable) {
+               /* When DSC is enabled on previous boot and reboot with the hub,
+                * there is a chance that Synaptics hub gets stuck during reboot sequence.
+                * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream
+                */
+               if (!stream->link->link_status.link_active &&
+                       memcmp(stream->link->dpcd_caps.branch_dev_name,
+                               (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0)
+                       apply_synaptics_fifo_reset_wa(aux);
+
+               ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
+               DRM_INFO("Send DSC enable to synaptics\n");
+
+       } else {
+               /* Synaptics hub not support virtual dpcd,
+                * external monitor occur garbage while disable DSC,
+                * Disable DSC only when entire link status turn to false,
+                */
+               if (!stream->link->link_status.link_active) {
+                       ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
+                       DRM_INFO("Send DSC disable to synaptics\n");
+               }
+       }
+
+       return ret;
+}
+#endif
+
 bool dm_helpers_dp_write_dsc_enable(
                struct dc_context *ctx,
                const struct dc_stream_state *stream,
@@ -570,7 +726,16 @@ bool dm_helpers_dp_write_dsc_enable(
                if (!aconnector->dsc_aux)
                        return false;
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               // apply w/a to synaptics
+               if (needs_dsc_aux_workaround(aconnector->dc_link) &&
+                   (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3)
+                       return write_dsc_enable_synaptics_non_virtual_dpcd_mst(
+                               aconnector->dsc_aux, stream, enable_dsc);
+#endif
+
                ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
+               DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable");
        }
 
        if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
@@ -648,14 +813,6 @@ enum dc_edid_status dm_helpers_read_local_edid(
                /* We don't need the original edid anymore */
                kfree(edid);
 
-               /* connector->display_info is parsed from EDID and saved
-                * into drm_connector->display_info
-                *
-                * drm_connector->display_info will be used by amdgpu_dm funcs,
-                * like fill_stream_properties_from_drm_display_mode
-                */
-               amdgpu_dm_update_connector_after_detect(aconnector);
-
                edid_status = dm_helpers_parse_edid_caps(
                                                link,
                                                &sink->dc_edid,
@@ -797,16 +954,12 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
                                         sizeof(new_downspread));
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
 {
-       // FPGA programming for this clock in diags framework that
-       // needs to go through dm layer, therefore leave dummy interace here
+       // TODO
 }
 
-
 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
 {
        /* TODO: add peridic detection implementation */
 }
-#endif
index 35c944a8e74d5d206a9ec5f2c2a620bce95f2423..d24be9fb5845427f6ceaf30dd6ec7fe5d93b176a 100644 (file)
@@ -159,7 +159,7 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 };
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-static bool needs_dsc_aux_workaround(struct dc_link *link)
+bool needs_dsc_aux_workaround(struct dc_link *link)
 {
        if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
            (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
@@ -209,6 +209,25 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
 
        return true;
 }
+
+bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
+{
+       union dp_downstream_port_present ds_port_present;
+
+       if (!aconnector->dsc_aux)
+               return false;
+
+       if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) {
+               DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n");
+               return false;
+       }
+
+       aconnector->mst_downstream_port_present = ds_port_present;
+       DRM_INFO("Downstream port present %d, type %d\n",
+                       ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE);
+
+       return true;
+}
 #endif
 
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -289,6 +308,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
                        if (!validate_dsc_caps_on_connector(aconnector))
                                memset(&aconnector->dc_sink->dsc_caps,
                                       0, sizeof(aconnector->dc_sink->dsc_caps));
+
+                       if (!retrieve_downstream_port_device(aconnector))
+                               memset(&aconnector->mst_downstream_port_present,
+                                       0, sizeof(aconnector->mst_downstream_port_present));
 #endif
                }
        }
index 900d3f7a84989e3b22f855a36c82191482bc7963..5da28ca033726022531e0747f840c4b1bbb27ec0 100644 (file)
 #ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
 #define __DAL_AMDGPU_DM_MST_TYPES_H__
 
+#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
+
+#define SYNAPTICS_RC_COMMAND       0x4B2
+#define SYNAPTICS_RC_RESULT        0x4B3
+#define SYNAPTICS_RC_LENGTH        0x4B8
+#define SYNAPTICS_RC_OFFSET        0x4BC
+#define SYNAPTICS_RC_DATA          0x4C0
+
 struct amdgpu_display_manager;
 struct amdgpu_dm_connector;
 
@@ -50,6 +58,8 @@ struct dsc_mst_fairness_vars {
 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                                       struct dc_state *dc_state,
                                       struct dsc_mst_fairness_vars *vars);
+
+bool needs_dsc_aux_workaround(struct dc_link *link);
 #endif
 
 #endif
index eba2701216984a2d88547579a95e231fc92ae9a3..75284e2cec747b3aa9b1de6504db2524c75988ed 100644 (file)
@@ -99,12 +99,9 @@ bool dm_pp_apply_display_requirements(
                        adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
                }
 
-               if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
-                       adev->powerplay.pp_funcs->display_configuration_change(
-                               adev->powerplay.pp_handle,
-                               &adev->pm.pm_display_cfg);
+               amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
 
-               amdgpu_pm_compute_clocks(adev);
+               amdgpu_dpm_compute_clocks(adev);
        }
 
        return true;
@@ -298,31 +295,25 @@ bool dm_pp_get_clock_levels_by_type(
                struct dm_pp_clock_levels *dc_clks)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
        struct amd_pp_clocks pp_clks = { 0 };
        struct amd_pp_simple_clock_info validation_clks = { 0 };
        uint32_t i;
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
-               if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
-                       dc_to_pp_clock_type(clk_type), &pp_clks)) {
-                       /* Error in pplib. Provide default values. */
-                       get_default_clock_levels(clk_type, dc_clks);
-                       return true;
-               }
+       if (amdgpu_dpm_get_clock_by_type(adev,
+               dc_to_pp_clock_type(clk_type), &pp_clks)) {
+               /* Error in pplib. Provide default values. */
+               get_default_clock_levels(clk_type, dc_clks);
+               return true;
        }
 
        pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
-               if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
-                                               pp_handle, &validation_clks)) {
-                       /* Error in pplib. Provide default values. */
-                       DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
-                       validation_clks.engine_max_clock = 72000;
-                       validation_clks.memory_max_clock = 80000;
-                       validation_clks.level = 0;
-               }
+       if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) {
+               /* Error in pplib. Provide default values. */
+               DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+               validation_clks.engine_max_clock = 72000;
+               validation_clks.memory_max_clock = 80000;
+               validation_clks.level = 0;
        }
 
        DRM_INFO("DM_PPLIB: Validation clocks:\n");
@@ -370,18 +361,14 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
        struct dm_pp_clock_levels_with_latency *clk_level_info)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
        struct pp_clock_levels_with_latency pp_clks = { 0 };
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int ret;
 
-       if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
-               ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
-                                               dc_to_pp_clock_type(clk_type),
-                                               &pp_clks);
-               if (ret)
-                       return false;
-       }
+       ret = amdgpu_dpm_get_clock_by_type_with_latency(adev,
+                                       dc_to_pp_clock_type(clk_type),
+                                       &pp_clks);
+       if (ret)
+               return false;
 
        pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 
@@ -394,18 +381,14 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
        struct dm_pp_clock_levels_with_voltage *clk_level_info)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
        struct pp_clock_levels_with_voltage pp_clk_info = {0};
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int ret;
 
-       if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
-               ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
-                                               dc_to_pp_clock_type(clk_type),
-                                               &pp_clk_info);
-               if (ret)
-                       return false;
-       }
+       ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev,
+                                       dc_to_pp_clock_type(clk_type),
+                                       &pp_clk_info);
+       if (ret)
+               return false;
 
        pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
 
@@ -417,19 +400,16 @@ bool dm_pp_notify_wm_clock_changes(
        struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
        /*
         * Limit this watermark setting for Polaris for now
         * TODO: expand this to other ASICs
         */
-       if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM)
-            && pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) {
-               if (!pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
+       if ((adev->asic_type >= CHIP_POLARIS10) &&
+            (adev->asic_type <= CHIP_VEGAM) &&
+            !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
                                                (void *)wm_with_clock_ranges))
                        return true;
-       }
 
        return false;
 }
@@ -456,12 +436,10 @@ bool dm_pp_apply_clock_for_voltage_request(
        if (!pp_clock_request.clock_type)
                return false;
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
-               ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
-                       adev->powerplay.pp_handle,
-                       &pp_clock_request);
-       if (ret)
+       ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request);
+       if (ret && (ret != -EOPNOTSUPP))
                return false;
+
        return true;
 }
 
@@ -471,15 +449,8 @@ bool dm_pp_get_static_clocks(
 {
        struct amdgpu_device *adev = ctx->driver_context;
        struct amd_pp_clock_info pp_clk_info = {0};
-       int ret = 0;
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
-               ret = adev->powerplay.pp_funcs->get_current_clocks(
-                       adev->powerplay.pp_handle,
-                       &pp_clk_info);
-       else
-               return false;
-       if (ret)
+       if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info))
                return false;
 
        static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
@@ -494,8 +465,6 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
        struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
        struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
@@ -536,72 +505,48 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp,
                                ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
        }
 
-       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
-               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
-                                                          &wm_with_clock_ranges);
+       amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
+                                                   &wm_with_clock_ranges);
 }
 
 static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
-               pp_funcs->notify_smu_enable_pwe(pp_handle);
+       amdgpu_dpm_notify_smu_enable_pwe(adev);
 }
 
 static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!pp_funcs || !pp_funcs->set_active_display_count)
-               return;
-
-       pp_funcs->set_active_display_count(pp_handle, count);
+       amdgpu_dpm_set_active_display_count(adev, count);
 }
 
 static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-       if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
-               return;
 
-       pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
+       amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock);
 }
 
 static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
-               return;
-
-       pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
+       amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock);
 }
 
 static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-       if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
-               return;
 
-       pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
+       amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz);
 }
 
 static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
@@ -609,11 +554,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
-               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
+       amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
 
        return PP_SMU_RESULT_OK;
 }
@@ -622,14 +564,13 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-       if (!pp_funcs || !pp_funcs->set_active_display_count)
+       ret = amdgpu_dpm_set_active_display_count(adev, count);
+       if (ret == -EOPNOTSUPP)
                return PP_SMU_RESULT_UNSUPPORTED;
-
-       /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
-       if (pp_funcs->set_active_display_count(pp_handle, count))
+       else if (ret)
+               /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -640,14 +581,13 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
-       if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       int ret = 0;
 
        /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
-       if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
+       ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz);
+       if (ret == -EOPNOTSUPP)
+               return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -658,12 +598,8 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
-
-       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       int ret = 0;
 
        clock_req.clock_type = amd_pp_dcef_clock;
        clock_req.clock_freq_in_khz = mhz * 1000;
@@ -671,7 +607,10 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
+       ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
+       if (ret == -EOPNOTSUPP)
+               return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -682,12 +621,8 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
-
-       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       int ret = 0;
 
        clock_req.clock_type = amd_pp_mem_clock;
        clock_req.clock_freq_in_khz = mhz * 1000;
@@ -695,7 +630,10 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
+       ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
+       if (ret == -EOPNOTSUPP)
+               return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -706,14 +644,10 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
-               if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
-                                                                 !pstate_handshake_supported))
-                       return PP_SMU_RESULT_FAIL;
-       }
+       if (amdgpu_dpm_display_disable_memory_clock_switch(adev,
+                                                         !pstate_handshake_supported))
+               return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
 }
@@ -723,12 +657,8 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
-
-       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       int ret = 0;
 
        switch (clock_id) {
        case PP_SMU_NV_DISPCLK:
@@ -748,7 +678,10 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
+       ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
+       if (ret == -EOPNOTSUPP)
+               return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -759,16 +692,16 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-       if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
+       ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev,
+                                                         max_clocks);
+       if (ret == -EOPNOTSUPP)
                return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
+               return PP_SMU_RESULT_FAIL;
 
-       if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
-               return PP_SMU_RESULT_OK;
-
-       return PP_SMU_RESULT_FAIL;
+       return PP_SMU_RESULT_OK;
 }
 
 static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
@@ -776,18 +709,17 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-       if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
+       ret = amdgpu_dpm_get_uclk_dpm_states(adev,
+                                            clock_values_in_khz,
+                                           num_states);
+       if (ret == -EOPNOTSUPP)
                return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
+               return PP_SMU_RESULT_FAIL;
 
-       if (!pp_funcs->get_uclk_dpm_states(pp_handle,
-                                          clock_values_in_khz,
-                                          num_states))
-               return PP_SMU_RESULT_OK;
-
-       return PP_SMU_RESULT_FAIL;
+       return PP_SMU_RESULT_OK;
 }
 
 static enum pp_smu_status pp_rn_get_dpm_clock_table(
@@ -795,16 +727,15 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-       if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
+       ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table);
+       if (ret == -EOPNOTSUPP)
                return PP_SMU_RESULT_UNSUPPORTED;
+       else if (ret)
+               return PP_SMU_RESULT_FAIL;
 
-       if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
-               return PP_SMU_RESULT_OK;
-
-       return PP_SMU_RESULT_FAIL;
+       return PP_SMU_RESULT_OK;
 }
 
 static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
@@ -812,11 +743,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       void *pp_handle = adev->powerplay.pp_handle;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
-               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
+       amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
 
        return PP_SMU_RESULT_OK;
 }
index c510638b4f997a3fbf1ec6025f5e0f434fb07588..a009fc654ac9524e9549da35e572625d8d1133fb 100644 (file)
@@ -149,10 +149,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
 
        link = stream->link;
 
-       psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
-
-       if (psr_config.psr_version > 0) {
-               psr_config.psr_exit_link_training_required = 0x1;
+       if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
+               psr_config.psr_version = link->psr_settings.psr_version;
                psr_config.psr_frame_capture_indication_req = 0;
                psr_config.psr_rfb_setup_time = 0x37;
                psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
index b1f0d6260226e504cb51c1bf063cab8436f6fbd5..0aaf394b73ffb6540b2588ec003b11cbda059547 100644 (file)
 # Makefile for Display Core (dc) component.
 #
 
-DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual
+DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 DC_LIBS += dcn20
 DC_LIBS += dsc
-DC_LIBS += dcn10 dml
+DC_LIBS += dcn10
 DC_LIBS += dcn21
 DC_LIBS += dcn201
 DC_LIBS += dcn30
@@ -58,7 +58,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
 include $(AMD_DC)
 
 DISPLAY_CORE = dc.o  dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
+dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
 dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
 
 ifdef CONFIG_DRM_AMD_DC_DCN
index 1e385d55e7fbba5f0fe80f471518faa2aafbaa74..23a3b640f0ee95ef26826eea792f12bde69dc08d 100644 (file)
@@ -1692,7 +1692,6 @@ static enum bp_result bios_parser_get_encoder_cap_info(
                        ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
        info->HDMI_6GB_EN = (record->encodercaps &
                        ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        info->IS_DP2_CAPABLE = (record->encodercaps &
                        ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0;
        info->DP_UHBR10_EN = (record->encodercaps &
@@ -1701,7 +1700,6 @@ static enum bp_result bios_parser_get_encoder_cap_info(
                        ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0;
        info->DP_UHBR20_EN = (record->encodercaps &
                        ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
-#endif
        info->DP_IS_USB_C = (record->encodercaps &
                        ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
 
index 9afa5eb2e6d3560eaff20969befc868e6b8b65d5..f52f7ff7ead4b60f68034c979c652120e9439192 100644 (file)
@@ -338,12 +338,10 @@ static enum bp_result transmitter_control_v1_7(
        const struct command_table_helper *cmd = bp->cmd_helper;
        struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0};
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0;
 
        if (dc_is_dp_signal(cntl->signal))
                hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0;
-#endif
 
        dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter);
        dig_v1_7.action = (uint8_t)cntl->action;
@@ -358,9 +356,7 @@ static enum bp_result transmitter_control_v1_7(
        dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
        dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
        dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        dig_v1_7.HPO_instance = hpo_instance;
-#endif
        dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10;
 
        if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
deleted file mode 100644 (file)
index f3c00f4..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-# Copyright 2019 Raptor Engineering, LLC
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for the 'calcs' sub-component of DAL.
-# It calculates Bandwidth and Watermarks values for HW programming
-#
-
-ifdef CONFIG_X86
-calcs_ccflags := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-calcs_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-calcs_ccflags += -mpreferred-stack-boundary=4
-else
-calcs_ccflags += -msse2
-endif
-endif
-
-CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_rcflags)
-
-BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-
-ifdef CONFIG_DRM_AMD_DC_DCN
-BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
-endif
-
-AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
index f977f29907df5898bc88c712dff6f7c510fb5516..589131d415fdc6ff7e7079104ad7d8daf491ea70 100644 (file)
@@ -42,8 +42,8 @@
 
 #include "nbio/nbio_7_4_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "mmhub/mmhub_2_0_0_offset.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
@@ -184,6 +184,7 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
        dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK,
                        &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
                        &num_levels);
+       dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, 0);
 
        /* DTBCLK */
        dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK,
index 48005def11645cf606b198b3efeb48d730f88c54..bc4ddc36fe58b4bb797adfe5a459fb211e611ffd 100644 (file)
@@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
                        .wm_inst = WM_A,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 7.95,
-                       .sr_enter_plus_exit_time_us = 9,
+                       .sr_exit_time_us = 13.5,
+                       .sr_enter_plus_exit_time_us = 16.5,
                        .valid = true,
                },
                {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 9.82,
-                       .sr_enter_plus_exit_time_us = 11.196,
+                       .sr_exit_time_us = 13.5,
+                       .sr_enter_plus_exit_time_us = 16.5,
                        .valid = true,
                },
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 9.89,
-                       .sr_enter_plus_exit_time_us = 11.24,
+                       .sr_exit_time_us = 13.5,
+                       .sr_enter_plus_exit_time_us = 16.5,
                        .valid = true,
                },
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.65333,
-                       .sr_exit_time_us = 9.748,
-                       .sr_enter_plus_exit_time_us = 11.102,
+                       .sr_exit_time_us = 13.5,
+                       .sr_enter_plus_exit_time_us = 16.5,
                        .valid = true,
                },
        }
index 4162ce40089b17d53666f9a110984c0dfba11b55..e17c9938cee58b78a4d6ac196cb1e06899af8e06 100644 (file)
@@ -139,9 +139,9 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
         * also if safe to lower is false, we just go in the higher state
         */
        if (safe_to_lower) {
-               if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+               if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
                                new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
-                       dcn31_smu_set_Z9_support(clk_mgr, true);
+                       dcn31_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
                        dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
                        clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
@@ -167,7 +167,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
        } else {
                if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
                                new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
-                       dcn31_smu_set_Z9_support(clk_mgr, false);
+                       dcn31_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
                        dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
                        clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
                }
@@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
 
 };
 
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr5_wm_table = {
        .entries = {
                {
                        .wm_inst = WM_A,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 6.09,
-                       .sr_enter_plus_exit_time_us = 7.14,
+                       .sr_exit_time_us = 9,
+                       .sr_enter_plus_exit_time_us = 11,
                        .valid = true,
                },
                {
                        .wm_inst = WM_B,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 9,
+                       .sr_enter_plus_exit_time_us = 11,
                        .valid = true,
                },
                {
                        .wm_inst = WM_C,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 9,
+                       .sr_enter_plus_exit_time_us = 11,
                        .valid = true,
                },
                {
                        .wm_inst = WM_D,
                        .wm_type = WM_TYPE_PSTATE_CHG,
                        .pstate_latency_us = 11.72,
-                       .sr_exit_time_us = 10.12,
-                       .sr_enter_plus_exit_time_us = 11.48,
+                       .sr_exit_time_us = 9,
+                       .sr_enter_plus_exit_time_us = 11,
                        .valid = true,
                },
        }
@@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
                if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
                        dcn31_bw_params.wm_table = lpddr5_wm_table;
                } else {
-                       dcn31_bw_params.wm_table = ddr4_wm_table;
+                       dcn31_bw_params.wm_table = ddr5_wm_table;
                }
                /* Saved clocks configured at boot for debug purposes */
                 dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
index a1011f3273f384cf7f0980a434e36bbce45d117a..c5d7d075026f31d886bafe8733afbea062d7d257 100644 (file)
@@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
        result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
 
        if (result == VBIOSSMC_Result_Failed) {
-               ASSERT(0);
+               if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+                   param == TABLE_WATERMARKS)
+                       DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+               else
+                       ASSERT(0);
                REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
                return -1;
        }
@@ -306,23 +310,32 @@ void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
                        VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS);
 }
 
-void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support)
+void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
 {
-       //TODO: Work with smu team to define optimization options.
-       unsigned int msg_id;
+       unsigned int msg_id, param;
 
        if (!clk_mgr->smu_present)
                return;
 
-       if (support)
-               msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
+       if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface &&
+                       (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY))
+               support = DCN_ZSTATE_SUPPORT_DISALLOW;
+
+
+       if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)
+               param = 1;
        else
+               param = 0;
+
+       if (support == DCN_ZSTATE_SUPPORT_DISALLOW)
                msg_id = VBIOSSMC_MSG_DisallowZstatesEntry;
+       else
+               msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
 
        dcn31_smu_send_msg_with_param(
                clk_mgr,
                msg_id,
-               0);
+               param);
 
 }
 
index cd0b7e1e685f8bfb8a9da8267548ad61fe695af5..dfa25a76a6d10aecbd3f2e690097af331dc00375 100644 (file)
@@ -265,7 +265,7 @@ void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr
 void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr);
 void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
 
-void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support);
+void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support);
 void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
 
 #endif /* DAL_DC_31_SMU_H_ */
index 6f5528d340939c0984a52a48778e2cfb0da50785..467f606ba2c729d79adfc06d042cee18cd795c75 100644 (file)
@@ -1220,6 +1220,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
 
                dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
 
+               dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
                if (dc->res_pool->dmcu != NULL)
                        dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
        }
@@ -1467,7 +1469,7 @@ static bool context_changed(
        return false;
 }
 
-bool dc_validate_seamless_boot_timing(const struct dc *dc,
+bool dc_validate_boot_timing(const struct dc *dc,
                                const struct dc_sink *sink,
                                struct dc_crtc_timing *crtc_timing)
 {
@@ -2377,10 +2379,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
                if (stream_update->dsc_config)
                        su_flags->bits.dsc_changed = 1;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (stream_update->mst_bw_update)
                        su_flags->bits.mst_bw = 1;
-#endif
 
                if (su_flags->raw != 0)
                        overall_type = UPDATE_TYPE_FULL;
@@ -2722,6 +2722,9 @@ static void commit_planes_do_stream_update(struct dc *dc,
                                        stream_update->vsp_infopacket) {
                                resource_build_info_frame(pipe_ctx);
                                dc->hwss.update_info_frame(pipe_ctx);
+
+                               if (dc_is_dp_signal(pipe_ctx->stream->signal))
+                                       dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
                        }
 
                        if (stream_update->hdr_static_metadata &&
@@ -2759,14 +2762,12 @@ static void commit_planes_do_stream_update(struct dc *dc,
                        if (stream_update->dsc_config)
                                dp_update_dsc_config(pipe_ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (stream_update->mst_bw_update) {
                                if (stream_update->mst_bw_update->is_increase)
                                        dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
                                else
                                        dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
                        }
-#endif
 
                        if (stream_update->pending_test_pattern) {
                                dc_link_dp_set_test_pattern(stream->link,
index b5e570d33ca947d1d4eb97190f682f0e56356801..b1718600fa0269c8fbc834e607d3299c4230088a 100644 (file)
@@ -720,35 +720,8 @@ static bool detect_dp(struct dc_link *link,
                sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
                if (!detect_dp_sink_caps(link))
                        return false;
-               if (is_mst_supported(link)) {
-                       sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
-                       link->type = dc_connection_mst_branch;
 
-                       dal_ddc_service_set_transaction_type(link->ddc,
-                                                            sink_caps->transaction_type);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
-                        * reports DSC support.
-                        */
-                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
-                                       link->type == dc_connection_mst_branch &&
-                                       link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
-                                       link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
-                                       !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
-                               link->wa_flags.dpia_mst_dsc_always_on = true;
-#endif
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-                       /* In case of fallback to SST when topology discovery below fails
-                        * HDCP caps will be querried again later by the upper layer (caller
-                        * of this function). */
-                       query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
-#endif
-               }
-
-               if (link->type != dc_connection_mst_branch &&
-                               is_dp_branch_device(link))
+               if (is_dp_branch_device(link))
                        /* DP SST branch */
                        link->type = dc_connection_sst_branch;
        } else {
@@ -824,15 +797,213 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
        return false;
 }
 
-/*
- * dc_link_detect() - Detect if a sink is attached to a given link
+static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+        * reports DSC support.
+        */
+       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+                       link->type == dc_connection_mst_branch &&
+                       link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                       link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 &&
+                       link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+                       !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+               link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+}
+
+static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+       /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+               link->wa_flags.dpia_mst_dsc_always_on = false;
+}
+
+static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
+{
+       DC_LOGGER_INIT(link->ctx->logger);
+
+       LINK_INFO("link=%d, mst branch is now Connected\n",
+                 link->link_index);
+
+       apply_dpia_mst_dsc_always_on_wa(link);
+       link->type = dc_connection_mst_branch;
+       dm_helpers_dp_update_branch_info(link->ctx, link);
+       if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
+                       link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
+               link_disconnect_sink(link);
+       } else {
+               link->type = dc_connection_sst_branch;
+       }
+
+       return link->type == dc_connection_mst_branch;
+}
+
+static bool reset_cur_dp_mst_topology(struct dc_link *link)
+{
+       bool result = false;
+       DC_LOGGER_INIT(link->ctx->logger);
+
+       LINK_INFO("link=%d, mst branch is now Disconnected\n",
+                 link->link_index);
+
+       revert_dpia_mst_dsc_always_on_wa(link);
+       result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+
+       link->mst_stream_alloc_table.stream_count = 0;
+       memset(link->mst_stream_alloc_table.stream_allocations,
+                       0,
+                       sizeof(link->mst_stream_alloc_table.stream_allocations));
+       return result;
+}
+
+static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
+               enum dc_detect_reason reason)
+{
+       int i;
+       bool can_apply_seamless_boot = false;
+
+       for (i = 0; i < dc->current_state->stream_count; i++) {
+               if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
+                       can_apply_seamless_boot = true;
+                       break;
+               }
+       }
+
+       return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT;
+}
+
+static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       dc_z10_restore(dc);
+#endif
+       clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+}
+
+static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+       clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+}
+
+static void set_all_streams_dpms_off_for_link(struct dc_link *link)
+{
+       int i;
+       struct pipe_ctx *pipe_ctx;
+       struct dc_stream_update stream_update;
+       bool dpms_off = true;
+       struct link_resource link_res = {0};
+
+       memset(&stream_update, 0, sizeof(stream_update));
+       stream_update.dpms_off = &dpms_off;
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+               if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+                               pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+                       stream_update.stream = pipe_ctx->stream;
+                       dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
+                                       pipe_ctx->stream, &stream_update,
+                                       link->ctx->dc->current_state);
+               }
+       }
+
+       /* link can be also enabled by vbios. In this case it is not recorded
+        * in pipe_ctx. Disable link phy here to make sure it is completely off
+        */
+       dp_disable_link_phy(link, &link_res, link->connector_signal);
+}
+
+static void verify_link_capability_destructive(struct dc_link *link,
+               struct dc_sink *sink,
+               enum dc_detect_reason reason)
+{
+       bool should_prepare_phy_clocks =
+                       should_prepare_phy_clocks_for_link_verification(link->dc, reason);
+
+       if (should_prepare_phy_clocks)
+               prepare_phy_clocks_for_destructive_link_verification(link->dc);
+
+       if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+               struct dc_link_settings known_limit_link_setting =
+                               dp_get_max_link_cap(link);
+               set_all_streams_dpms_off_for_link(link);
+               dp_verify_link_cap_with_retries(
+                               link, &known_limit_link_setting,
+                               LINK_TRAINING_MAX_VERIFY_RETRY);
+       } else {
+               ASSERT(0);
+       }
+
+       if (should_prepare_phy_clocks)
+               restore_phy_clocks_for_destructive_link_verification(link->dc);
+}
+
+static void verify_link_capability_non_destructive(struct dc_link *link)
+{
+       if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+               if (dc_is_embedded_signal(link->local_sink->sink_signal) ||
+                               link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+                       /* TODO - should we check link encoder's max link caps here?
+                        * How do we know which link encoder to check from?
+                        */
+                       link->verified_link_cap = link->reported_link_cap;
+               else
+                       link->verified_link_cap = dp_get_max_link_cap(link);
+       }
+}
+
+static bool should_verify_link_capability_destructively(struct dc_link *link,
+               enum dc_detect_reason reason)
+{
+       bool destrictive = false;
+       struct dc_link_settings max_link_cap;
+       bool is_link_enc_unavailable = link->link_enc &&
+                       link->dc->res_pool->funcs->link_encs_assign &&
+                       !link_enc_cfg_is_link_enc_avail(
+                                       link->ctx->dc,
+                                       link->link_enc->preferred_engine,
+                                       link);
+
+       if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+               max_link_cap = dp_get_max_link_cap(link);
+               destrictive = true;
+
+               if (link->dc->debug.skip_detection_link_training ||
+                               dc_is_embedded_signal(link->local_sink->sink_signal) ||
+                               link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+                       destrictive = false;
+               } else if (dp_get_link_encoding_format(&max_link_cap) ==
+                               DP_8b_10b_ENCODING) {
+                       if (link->dpcd_caps.is_mst_capable ||
+                                       is_link_enc_unavailable) {
+                               destrictive = false;
+                       }
+               }
+       }
+
+       return destrictive;
+}
+
+static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
+               enum dc_detect_reason reason)
+{
+       if (should_verify_link_capability_destructively(link, reason))
+               verify_link_capability_destructive(link, sink, reason);
+       else
+               verify_link_capability_non_destructive(link);
+}
+
+
+/**
+ * detect_link_and_local_sink() - Detect if a sink is attached to a given link
  *
  * link->local_sink is created or destroyed as needed.
  *
- * This does not create remote sinks but will trigger DM
- * to start MST detection if a branch is detected.
+ * This does not create remote sinks.
  */
-static bool dc_link_detect_helper(struct dc_link *link,
+static bool detect_link_and_local_sink(struct dc_link *link,
                                  enum dc_detect_reason reason)
 {
        struct dc_sink_init_data sink_init_data = { 0 };
@@ -848,9 +1019,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
        struct dpcd_caps prev_dpcd_caps;
        enum dc_connection_type new_connection_type = dc_connection_none;
        enum dc_connection_type pre_connection_type = dc_connection_none;
-       bool perform_dp_seamless_boot = false;
        const uint32_t post_oui_delay = 30; // 30ms
-       struct link_resource link_res = { 0 };
 
        DC_LOGGER_INIT(link->ctx->logger);
 
@@ -862,7 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                (!link->dc->config.allow_edp_hotplug_detection)) &&
                link->local_sink) {
                // need to re-write OUI and brightness in resume case
-               if (link->connector_signal == SIGNAL_TYPE_EDP) {
+               if (link->connector_signal == SIGNAL_TYPE_EDP &&
+                       (link->dpcd_sink_ext_caps.bits.oled == 1)) {
                        dpcd_set_source_specific_data(link);
                        msleep(post_oui_delay);
                        dc_link_set_default_brightness_aux(link);
@@ -943,61 +1113,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
                                return false;
                        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
-                               link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt(
-                                               &link->dc->current_state->res_ctx,
-                                               link->dc->res_pool,
-                                               link);
-#endif
-
-                       if (link->type == dc_connection_mst_branch) {
-                               LINK_INFO("link=%d, mst branch is now Connected\n",
-                                         link->link_index);
-                               /* Need to setup mst link_cap struct here
-                                * otherwise dc_link_detect() will leave mst link_cap
-                                * empty which leads to allocate_mst_payload() has "0"
-                                * pbn_per_slot value leading to exception on dc_fixpt_div()
-                                */
-                               dp_verify_mst_link_cap(link, &link_res);
-
-                               /*
-                                * This call will initiate MST topology discovery. Which
-                                * will detect MST ports and add new DRM connector DRM
-                                * framework. Then read EDID via remote i2c over aux. In
-                                * the end, will notify DRM detect result and save EDID
-                                * into DRM framework.
-                                *
-                                * .detect is called by .fill_modes.
-                                * .fill_modes is called by user mode ioctl
-                                * DRM_IOCTL_MODE_GETCONNECTOR.
-                                *
-                                * .get_modes is called by .fill_modes.
-                                *
-                                * call .get_modes, AMDGPU DM implementation will create
-                                * new dc_sink and add to dc_link. For long HPD plug
-                                * in/out, MST has its own handle.
-                                *
-                                * Therefore, just after dc_create, link->sink is not
-                                * created for MST until user mode app calls
-                                * DRM_IOCTL_MODE_GETCONNECTOR.
-                                *
-                                * Need check ->sink usages in case ->sink = NULL
-                                * TODO: s3 resume check
-                                */
-
-                               dm_helpers_dp_update_branch_info(link->ctx, link);
-                               if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
-                                               link, reason == DETECT_REASON_BOOT)) {
-                                       if (prev_sink)
-                                               dc_sink_release(prev_sink);
-                                       return false;
-                               } else {
-                                       link->type = dc_connection_sst_branch;
-                                       sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT;
-                               }
-                       }
-
                        /* Active SST downstream branch device unplug*/
                        if (link->type == dc_connection_sst_branch &&
                            link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
@@ -1018,19 +1133,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
                        if (pre_connection_type == dc_connection_mst_branch &&
                                        link->type != dc_connection_mst_branch)
                                dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-
-
-                       // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
-                       if (reason == DETECT_REASON_BOOT &&
-                                       !dc_ctx->dc->config.power_down_display_on_boot &&
-                                       link->link_status.link_active)
-                               perform_dp_seamless_boot = true;
-
-                       if (perform_dp_seamless_boot) {
-                               read_current_link_settings_on_detect(link);
-                               link->verified_link_cap = link->reported_link_cap;
-                       }
-
                        break;
                }
 
@@ -1119,13 +1221,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
 #if defined(CONFIG_DRM_AMD_DC_HDCP)
                        query_hdcp_capability(sink->sink_signal, link);
 #endif
-
-                       // verify link cap for SST non-seamless boot
-                       if (!perform_dp_seamless_boot)
-                               dp_verify_link_cap_with_retries(link,
-                                                               &link_res,
-                                                               &link->reported_link_cap,
-                                                               LINK_TRAINING_MAX_VERIFY_RETRY);
                } else {
                        // If edid is the same, then discard new sink and revert back to original sink
                        if (same_edid) {
@@ -1185,27 +1280,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
                }
        } else {
                /* From Connected-to-Disconnected. */
-               if (link->type == dc_connection_mst_branch) {
-                       LINK_INFO("link=%d, mst branch is now Disconnected\n",
-                                 link->link_index);
-
-                       /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
-                       if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
-                               link->wa_flags.dpia_mst_dsc_always_on = false;
-
-                       dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-
-                       link->mst_stream_alloc_table.stream_count = 0;
-                       memset(link->mst_stream_alloc_table.stream_allocations,
-                              0,
-                              sizeof(link->mst_stream_alloc_table.stream_allocations));
-               }
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
-                       reset_dp_hpo_stream_encoders_for_link(link);
-#endif
-
                link->type = dc_connection_none;
                sink_caps.signal = SIGNAL_TYPE_NONE;
                /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
@@ -1214,6 +1288,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
                 *  Clear dongle_max_pix_clk on disconnect to fix this
                 */
                link->dongle_max_pix_clk = 0;
+
+               dc_link_dp_clear_rx_status(link);
        }
 
        LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
@@ -1230,33 +1306,26 @@ static bool dc_link_detect_helper(struct dc_link *link,
 
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 {
-       const struct dc *dc = link->dc;
-       bool ret;
-       bool can_apply_seamless_boot = false;
-       int i;
+       bool is_local_sink_detect_success;
+       bool is_delegated_to_mst_top_mgr = false;
+       enum dc_connection_type pre_link_type = link->type;
 
-       for (i = 0; i < dc->current_state->stream_count; i++) {
-               if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
-                       can_apply_seamless_boot = true;
-                       break;
-               }
-       }
+       is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       dc_z10_restore(dc);
-#endif
-
-       /* get out of low power state */
-       if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
-               clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+       if (is_local_sink_detect_success && link->local_sink)
+               verify_link_capability(link, link->local_sink, reason);
 
-       ret = dc_link_detect_helper(link, reason);
+       if (is_local_sink_detect_success && link->local_sink &&
+                       dc_is_dp_signal(link->local_sink->sink_signal) &&
+                       link->dpcd_caps.is_mst_capable)
+               is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
 
-       /* Go back to power optimized state */
-       if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
-               clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+       if (is_local_sink_detect_success &&
+                       pre_link_type == dc_connection_mst_branch &&
+                       link->type != dc_connection_mst_branch)
+               is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link);
 
-       return ret;
+       return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
 }
 
 bool dc_link_get_hpd_state(struct dc_link *dc_link)
@@ -1586,9 +1655,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
        }
 
        DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
-#endif
 
        /* Update link encoder tracking variables. These are used for the dynamic
         * assignment of link encoders to streams.
@@ -1871,7 +1938,6 @@ static enum dc_status enable_link_dp(struct dc_state *state,
        if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
                do_fallback = true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /*
         * Temporary w/a to get DP2.0 link rates to work with SST.
         * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
@@ -1881,7 +1947,6 @@ static enum dc_status enable_link_dp(struct dc_state *state,
                        link->dc->debug.set_mst_en_for_sst) {
                dp_enable_mst_on_sink(link, true);
        }
-#endif
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
                /*in case it is not on*/
@@ -1889,7 +1954,6 @@ static enum dc_status enable_link_dp(struct dc_state *state,
                link->dc->hwss.edp_wait_for_hpd_ready(link, true);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
                /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
        } else {
@@ -1899,19 +1963,15 @@ static enum dc_status enable_link_dp(struct dc_state *state,
                        state->clk_mgr->funcs->update_clocks(state->clk_mgr,
                                        state, false);
        }
-#else
-       pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
-               link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
-       if (state->clk_mgr && !apply_seamless_boot_optimization)
-               state->clk_mgr->funcs->update_clocks(state->clk_mgr,
-                                                                                       state, false);
-#endif
 
        // during mode switch we do DP_SET_POWER off then on, and OUI is lost
        dpcd_set_source_specific_data(link);
        if (link->dpcd_sink_ext_caps.raw != 0)
                msleep(post_oui_delay);
 
+       // similarly, mode switch can cause loss of cable ID
+       dpcd_update_cable_id(link);
+
        skip_video_pattern = true;
 
        if (link_settings.link_rate == LINK_RATE_LOW)
@@ -1934,12 +1994,8 @@ static enum dc_status enable_link_dp(struct dc_state *state,
        else
                fec_enable = true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
                dp_set_fec_enable(link, fec_enable);
-#else
-       dp_set_fec_enable(link, fec_enable);
-#endif
 
        // during mode set we do DP_SET_POWER off then on, aux writes are lost
        if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2495,9 +2551,7 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
 
        if (dc_is_dp_signal(signal)) {
                /* SST DP, eDP */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                struct dc_link_settings link_settings = link->cur_link_settings;
-#endif
                if (dc_is_dp_sst_signal(signal))
                        dp_disable_link_phy(link, link_res, signal);
                else
@@ -2505,15 +2559,10 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_
 
                if (dc_is_dp_sst_signal(signal) ||
                                link->mst_stream_alloc_table.stream_count == 0) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
                                dp_set_fec_enable(link, false);
                                dp_set_fec_ready(link, link_res, false);
                        }
-#else
-                       dp_set_fec_enable(link, false);
-                       dp_set_fec_ready(link, link_res, false);
-#endif
                }
        } else {
                if (signal != SIGNAL_TYPE_VIRTUAL)
@@ -2696,72 +2745,63 @@ static bool dp_active_dongle_validate_timing(
                break;
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
                        dongle_caps->extendedCapValid == true) {
-#else
-       if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
-               dongle_caps->extendedCapValid == false)
-               return true;
-#endif
-
-       /* Check Pixel Encoding */
-       switch (timing->pixel_encoding) {
-       case PIXEL_ENCODING_RGB:
-       case PIXEL_ENCODING_YCBCR444:
-               break;
-       case PIXEL_ENCODING_YCBCR422:
-               if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
-                       return false;
-               break;
-       case PIXEL_ENCODING_YCBCR420:
-               if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+               /* Check Pixel Encoding */
+               switch (timing->pixel_encoding) {
+               case PIXEL_ENCODING_RGB:
+               case PIXEL_ENCODING_YCBCR444:
+                       break;
+               case PIXEL_ENCODING_YCBCR422:
+                       if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+                               return false;
+                       break;
+               case PIXEL_ENCODING_YCBCR420:
+                       if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+                               return false;
+                       break;
+               default:
+                       /* Invalid Pixel Encoding*/
                        return false;
-               break;
-       default:
-               /* Invalid Pixel Encoding*/
-               return false;
-       }
+               }
 
-       switch (timing->display_color_depth) {
-       case COLOR_DEPTH_666:
-       case COLOR_DEPTH_888:
-               /*888 and 666 should always be supported*/
-               break;
-       case COLOR_DEPTH_101010:
-               if (dongle_caps->dp_hdmi_max_bpc < 10)
-                       return false;
-               break;
-       case COLOR_DEPTH_121212:
-               if (dongle_caps->dp_hdmi_max_bpc < 12)
+               switch (timing->display_color_depth) {
+               case COLOR_DEPTH_666:
+               case COLOR_DEPTH_888:
+                       /*888 and 666 should always be supported*/
+                       break;
+               case COLOR_DEPTH_101010:
+                       if (dongle_caps->dp_hdmi_max_bpc < 10)
+                               return false;
+                       break;
+               case COLOR_DEPTH_121212:
+                       if (dongle_caps->dp_hdmi_max_bpc < 12)
+                               return false;
+                       break;
+               case COLOR_DEPTH_141414:
+               case COLOR_DEPTH_161616:
+               default:
+                       /* These color depths are currently not supported */
                        return false;
-               break;
-       case COLOR_DEPTH_141414:
-       case COLOR_DEPTH_161616:
-       default:
-               /* These color depths are currently not supported */
-               return false;
-       }
+               }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
-               struct dc_crtc_timing outputTiming = *timing;
+               if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
+                       struct dc_crtc_timing outputTiming = *timing;
 
-               if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
-                       /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
-                       outputTiming.flags.DSC = 0;
-               if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
-                       return false;
-       } else { // DP to HDMI TMDS converter
+                       if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+                               /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
+                               outputTiming.flags.DSC = 0;
+                       if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+                               return false;
+               } else { // DP to HDMI TMDS converter
+                       if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+                               return false;
+               }
+#else
                if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
                        return false;
-       }
-#else
-       if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
-               return false;
 #endif
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        }
 
        if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
@@ -2842,7 +2882,6 @@ static bool dp_active_dongle_validate_timing(
                                return false;
                }
        }
-#endif
 
        return true;
 }
@@ -3315,9 +3354,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
 static void update_mst_stream_alloc_table(
        struct dc_link *link,
        struct stream_encoder *stream_enc,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
-#endif
        const struct dp_mst_stream_allocation_table *proposed_table)
 {
        struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
@@ -3353,9 +3390,7 @@ static void update_mst_stream_alloc_table(
                        work_table[i].slot_count =
                                proposed_table->stream_allocations[i].slot_count;
                        work_table[i].stream_enc = stream_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
-#endif
                }
        }
 
@@ -3366,7 +3401,7 @@ static void update_mst_stream_alloc_table(
                link->mst_stream_alloc_table.stream_allocations[i] =
                                work_table[i];
 }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
 static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
 {
        const uint32_t VCP_Y_PRECISION = 1000;
@@ -3402,6 +3437,8 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
        struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
        struct link_mst_stream_allocation_table proposed_table = {0};
        struct fixed31_32 avg_time_slots_per_mtp;
+       const struct dc_link_settings empty_link_settings = {0};
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        DC_LOGGER_INIT(link->ctx->logger);
 
        /* slot X.Y for SST payload deallocate */
@@ -3410,10 +3447,13 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
 
                dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
 
-               hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
-                               hpo_dp_link_encoder,
-                               hpo_dp_stream_encoder->inst,
-                               avg_time_slots_per_mtp);
+               if (link_hwss->ext.set_throttled_vcp_size)
+                       link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+                                       avg_time_slots_per_mtp);
+               if (link_hwss->ext.set_hblank_min_symbol_width)
+                       link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                                       &empty_link_settings,
+                                       avg_time_slots_per_mtp);
        }
 
        /* calculate VC payload and update branch with new payload allocation table*/
@@ -3457,10 +3497,13 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
 
                dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
 
-               hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
-                               hpo_dp_link_encoder,
-                               hpo_dp_stream_encoder->inst,
-                               avg_time_slots_per_mtp);
+               if (link_hwss->ext.set_throttled_vcp_size)
+                       link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+                                       avg_time_slots_per_mtp);
+               if (link_hwss->ext.set_hblank_min_symbol_width)
+                       link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                                       &link->cur_link_settings,
+                                       avg_time_slots_per_mtp);
        }
 
        /* Always return DC_OK.
@@ -3468,7 +3511,6 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
         */
        return DC_OK;
 }
-#endif
 
 /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
  * because stream_encoder is not exposed to dm
@@ -3478,24 +3520,17 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
        struct link_encoder *link_encoder = NULL;
-       struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
-       struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
-#endif
        struct dp_mst_stream_allocation_table proposed_table = {0};
        struct fixed31_32 avg_time_slots_per_mtp;
        struct fixed31_32 pbn;
        struct fixed31_32 pbn_per_slot;
        int i;
        enum act_return_status ret;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        DC_LOGGER_INIT(link->ctx->logger);
 
-       /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
-       if (link->ep_type == DISPLAY_ENDPOINT_PHY)
-               link_encoder = link->link_enc;
-       else if (link->dc->res_pool->funcs->link_encs_assign)
-               link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+       link_encoder = link_enc_cfg_get_link_enc(link);
        ASSERT(link_encoder);
 
        /* enable_link_dp_mst already check link->enabled_stream_count
@@ -3508,17 +3543,12 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
                stream->ctx,
                stream,
                &proposed_table,
-               true)) {
+               true))
                update_mst_stream_alloc_table(
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                                        link,
                                        pipe_ctx->stream_res.stream_enc,
                                        pipe_ctx->stream_res.hpo_dp_stream_enc,
                                        &proposed_table);
-#else
-                                       link, pipe_ctx->stream_res.stream_enc, &proposed_table);
-#endif
-       }
        else
                DC_LOG_WARNING("Failed to update"
                                "MST allocation table for"
@@ -3531,7 +3561,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
                        link->mst_stream_alloc_table.stream_count);
 
        for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                DC_LOG_MST("stream_enc[%d]: %p      "
                "stream[%d].hpo_dp_stream_enc: %p      "
                "stream[%d].vcp_id: %d      "
@@ -3544,17 +3573,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
                link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
                i,
                link->mst_stream_alloc_table.stream_allocations[i].slot_count);
-#else
-               DC_LOG_MST("stream_enc[%d]: %p      "
-               "stream[%d].vcp_id: %d      "
-               "stream[%d].slot_count: %d\n",
-               i,
-               (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
-               i,
-               link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
-               i,
-               link->mst_stream_alloc_table.stream_allocations[i].slot_count);
-#endif
        }
 
        ASSERT(proposed_table.stream_count > 0);
@@ -3574,7 +3592,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
        }
 
        /* program DP source TX for payload */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
        case DP_8b_10b_ENCODING:
                link_encoder->funcs->update_mst_stream_allocation_table(
@@ -3590,11 +3607,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
                DC_LOG_ERROR("Failure: unknown encoding format\n");
                return DC_ERROR_UNEXPECTED;
        }
-#else
-       link_encoder->funcs->update_mst_stream_allocation_table(
-               link_encoder,
-               &link->mst_stream_alloc_table);
-#endif
 
        /* send down message */
        ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3617,34 +3629,19 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
        pbn = get_pbn_from_timing(pipe_ctx);
        avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
-       case DP_8b_10b_ENCODING:
-               stream_encoder->funcs->set_throttled_vcp_size(
-                       stream_encoder,
-                       avg_time_slots_per_mtp);
-               break;
-       case DP_128b_132b_ENCODING:
-               hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
-                               hpo_dp_link_encoder,
-                               hpo_dp_stream_encoder->inst,
+       dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+       if (link_hwss->ext.set_throttled_vcp_size)
+               link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+       if (link_hwss->ext.set_hblank_min_symbol_width)
+               link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                               &link->cur_link_settings,
                                avg_time_slots_per_mtp);
-               break;
-       case DP_UNKNOWN_ENCODING:
-               DC_LOG_ERROR("Failure: unknown encoding format\n");
-               return DC_ERROR_UNEXPECTED;
-       }
-#else
-       stream_encoder->funcs->set_throttled_vcp_size(
-               stream_encoder,
-               avg_time_slots_per_mtp);
-#endif
 
        return DC_OK;
 
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
 {
        struct dc_stream_state *stream = pipe_ctx->stream;
@@ -3653,10 +3650,10 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
        struct fixed31_32 pbn;
        struct fixed31_32 pbn_per_slot;
        struct link_encoder *link_encoder = link->link_enc;
-       struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
        struct dp_mst_stream_allocation_table proposed_table = {0};
        uint8_t i;
        enum act_return_status ret;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        DC_LOGGER_INIT(link->ctx->logger);
 
        /* decrease throttled vcp size */
@@ -3664,8 +3661,11 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
        pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
        avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
 
-       stream_encoder->funcs->set_throttled_vcp_size(
-                               stream_encoder,
+       if (link_hwss->ext.set_throttled_vcp_size)
+               link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+       if (link_hwss->ext.set_hblank_min_symbol_width)
+               link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                               &link->cur_link_settings,
                                avg_time_slots_per_mtp);
 
        /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
@@ -3733,10 +3733,10 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t
        struct fixed31_32 pbn;
        struct fixed31_32 pbn_per_slot;
        struct link_encoder *link_encoder = link->link_enc;
-       struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
        struct dp_mst_stream_allocation_table proposed_table = {0};
        uint8_t i;
        enum act_return_status ret;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        DC_LOGGER_INIT(link->ctx->logger);
 
        /* notify immediate branch device table update */
@@ -3795,35 +3795,31 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t
        pbn_per_slot = get_pbn_per_slot(stream);
        avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
 
-       stream_encoder->funcs->set_throttled_vcp_size(
-                               stream_encoder,
+       if (link_hwss->ext.set_throttled_vcp_size)
+               link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+       if (link_hwss->ext.set_hblank_min_symbol_width)
+               link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                               &link->cur_link_settings,
                                avg_time_slots_per_mtp);
 
        return DC_OK;
 }
-#endif
 
 static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
 {
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
        struct link_encoder *link_encoder = NULL;
-       struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
-       struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
-#endif
        struct dp_mst_stream_allocation_table proposed_table = {0};
        struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
        int i;
        bool mst_mode = (link->type == dc_connection_mst_branch);
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+       const struct dc_link_settings empty_link_settings = {0};
        DC_LOGGER_INIT(link->ctx->logger);
 
-       /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
-       if (link->ep_type == DISPLAY_ENDPOINT_PHY)
-               link_encoder = link->link_enc;
-       else if (link->dc->res_pool->funcs->link_encs_assign)
-               link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+       link_encoder = link_enc_cfg_get_link_enc(link);
        ASSERT(link_encoder);
 
        /* deallocate_mst_payload is called before disable link. When mode or
@@ -3834,28 +3830,12 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
         */
 
        /* slot X.Y */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
-       case DP_8b_10b_ENCODING:
-               stream_encoder->funcs->set_throttled_vcp_size(
-                       stream_encoder,
-                       avg_time_slots_per_mtp);
-               break;
-       case DP_128b_132b_ENCODING:
-               hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
-                               hpo_dp_link_encoder,
-                               hpo_dp_stream_encoder->inst,
+       if (link_hwss->ext.set_throttled_vcp_size)
+               link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+       if (link_hwss->ext.set_hblank_min_symbol_width)
+               link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+                               &empty_link_settings,
                                avg_time_slots_per_mtp);
-               break;
-       case DP_UNKNOWN_ENCODING:
-               DC_LOG_ERROR("Failure: unknown encoding format\n");
-               return DC_ERROR_UNEXPECTED;
-       }
-#else
-       stream_encoder->funcs->set_throttled_vcp_size(
-               stream_encoder,
-               avg_time_slots_per_mtp);
-#endif
 
        /* TODO: which component is responsible for remove payload table? */
        if (mst_mode) {
@@ -3865,16 +3845,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                                &proposed_table,
                                false)) {
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        update_mst_stream_alloc_table(
                                                link,
                                                pipe_ctx->stream_res.stream_enc,
                                                pipe_ctx->stream_res.hpo_dp_stream_enc,
                                                &proposed_table);
-#else
-                       update_mst_stream_alloc_table(
-                               link, pipe_ctx->stream_res.stream_enc, &proposed_table);
-#endif
                }
                else {
                                DC_LOG_WARNING("Failed to update"
@@ -3890,7 +3865,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                        link->mst_stream_alloc_table.stream_count);
 
        for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                DC_LOG_MST("stream_enc[%d]: %p      "
                "stream[%d].hpo_dp_stream_enc: %p      "
                "stream[%d].vcp_id: %d      "
@@ -3903,17 +3877,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
                i,
                link->mst_stream_alloc_table.stream_allocations[i].slot_count);
-#else
-               DC_LOG_MST("stream_enc[%d]: %p      "
-               "stream[%d].vcp_id: %d      "
-               "stream[%d].slot_count: %d\n",
-               i,
-               (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
-               i,
-               link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
-               i,
-               link->mst_stream_alloc_table.stream_allocations[i].slot_count);
-#endif
        }
 
        if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
@@ -3930,7 +3893,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                                status, mst_alloc_slots, prev_mst_slots_in_use);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
        case DP_8b_10b_ENCODING:
                link_encoder->funcs->update_mst_stream_allocation_table(
@@ -3946,11 +3908,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
                DC_LOG_ERROR("Failure: unknown encoding format\n");
                return DC_ERROR_UNEXPECTED;
        }
-#else
-       link_encoder->funcs->update_mst_stream_allocation_table(
-               link_encoder,
-               &link->mst_stream_alloc_table);
-#endif
 
        if (mst_mode) {
                dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3979,13 +3936,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
        if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
                return;
 
-       if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
-               link_enc = pipe_ctx->stream->link->link_enc;
-       else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
-                       pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_stream(
-                               pipe_ctx->stream->ctx->dc,
-                               pipe_ctx->stream);
+       link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
        ASSERT(link_enc);
        if (link_enc == NULL)
                return;
@@ -3998,21 +3949,18 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 
        /* stream encoder index */
        config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx))
                config.stream_enc_idx =
                                pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
-#endif
 
        /* dig back end */
        config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
 
        /* link encoder index */
        config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx))
                config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
-#endif
+
        /* dio output index */
        config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
 
@@ -4027,9 +3975,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
        config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
        config.mst_enabled = (pipe_ctx->stream->signal ==
                        SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
-#endif
        config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
                        1 : 0;
        config.dpms_off = dpms_off;
@@ -4041,7 +3987,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
 }
 #endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
 {
        struct dc *dc = pipe_ctx->stream->ctx->dc;
@@ -4050,15 +3995,17 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
        struct fixed31_32 avg_time_slots_per_mtp;
        uint8_t req_slot_count = 0;
        uint8_t vc_id = 1; /// VC ID always 1 for SST
-
        struct dc_link_settings link_settings = {0};
+       const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
        decide_link_settings(stream, &link_settings);
        stream->link->cur_link_settings = link_settings;
 
-       /*  Enable clock, Configure lane count, and Enable Link Encoder*/
-       enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings);
+       if (link_hwss->ext.enable_dp_link_output)
+               link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res,
+                               stream->signal, pipe_ctx->clock_source->id,
+                               &link_settings);
 
 #ifdef DIAGS_BUILD
        /* Workaround for FPGA HPO capture DP link data:
@@ -4112,16 +4059,11 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
                        pipe_ctx->link_res.hpo_dp_link_enc,
                        &proposed_table);
 
-       pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size(
-                       pipe_ctx->link_res.hpo_dp_link_enc,
-                       pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
-                       avg_time_slots_per_mtp);
-
-
+       if (link_hwss->ext.set_throttled_vcp_size)
+               link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
 
        dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
 }
-#endif
 
 void core_link_enable_stream(
                struct dc_state *state,
@@ -4132,31 +4074,23 @@ void core_link_enable_stream(
        struct dc_link *link = stream->sink->link;
        enum dc_status status;
        struct link_encoder *link_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
        struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
 
        if (is_dp_128b_132b_signal(pipe_ctx))
                vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-#endif
+
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
 
        if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
                return;
 
-       if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY)
-               link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream);
-       else
-               link_enc = stream->link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
                        && !is_dp_128b_132b_signal(pipe_ctx)) {
-#else
-       if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
-#endif
                if (link_enc)
                        link_enc->funcs->setup(
                                link_enc,
@@ -4167,7 +4101,6 @@ void core_link_enable_stream(
                        stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx)) {
                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
                                pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -4185,14 +4118,6 @@ void core_link_enable_stream(
                                stream->use_vsc_sdp_for_colorimetry,
                                stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
        }
-#else
-       pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
-                       pipe_ctx->stream_res.stream_enc,
-                       &stream->timing,
-                       stream->output_color_space,
-                       stream->use_vsc_sdp_for_colorimetry,
-                       stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
-#endif
 
        if (dc_is_dp_signal(pipe_ctx->stream->signal))
                dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
@@ -4206,10 +4131,8 @@ void core_link_enable_stream(
 
        pipe_ctx->stream->link->link_state_valid = true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
                pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
-#endif
 
        if (dc_is_dvi_signal(pipe_ctx->stream->signal))
                pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
@@ -4229,11 +4152,9 @@ void core_link_enable_stream(
 
                pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                // Enable VPG before building infoframe
                if (vpg && vpg->funcs->vpg_poweron)
                        vpg->funcs->vpg_poweron(vpg);
-#endif
 
                resource_build_info_frame(pipe_ctx);
                dc->hwss.update_info_frame(pipe_ctx);
@@ -4319,12 +4240,8 @@ void core_link_enable_stream(
                 * as a workaround for the incorrect value being applied
                 * from transmitter control.
                 */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
                                is_dp_128b_132b_signal(pipe_ctx)))
-#else
-               if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
-#endif
                        if (link_enc)
                                link_enc->funcs->setup(
                                        link_enc,
@@ -4343,11 +4260,9 @@ void core_link_enable_stream(
 
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_link_allocate_mst_payload(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
                                is_dp_128b_132b_signal(pipe_ctx))
                        dc_link_update_sst_payload(pipe_ctx, true);
-#endif
 
                dc->hwss.unblank_stream(pipe_ctx,
                        &pipe_ctx->stream->link->cur_link_settings);
@@ -4364,11 +4279,9 @@ void core_link_enable_stream(
                dc->hwss.enable_audio_stream(pipe_ctx);
 
        } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (is_dp_128b_132b_signal(pipe_ctx)) {
                        fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
                }
-#endif
                if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
                                dc_is_virtual_signal(pipe_ctx->stream->signal))
                        dp_set_dsc_enable(pipe_ctx, true);
@@ -4385,12 +4298,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
        struct dc  *dc = pipe_ctx->stream->ctx->dc;
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->sink->link;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
 
        if (is_dp_128b_132b_signal(pipe_ctx))
                vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-#endif
 
        if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
                        dc_is_virtual_signal(pipe_ctx->stream->signal))
@@ -4410,11 +4321,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
 
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
                        is_dp_128b_132b_signal(pipe_ctx))
                dc_link_update_sst_payload(pipe_ctx, false);
-#endif
 
        if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
                struct ext_hdmi_settings settings = {0};
@@ -4441,7 +4350,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
                }
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
                        !is_dp_128b_132b_signal(pipe_ctx)) {
 
@@ -4458,27 +4366,18 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
                dc->hwss.disable_stream(pipe_ctx);
                disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
        }
-#else
-       disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
-
-       dc->hwss.disable_stream(pipe_ctx);
-#endif
 
        if (pipe_ctx->stream->timing.flags.DSC) {
                if (dc_is_dp_signal(pipe_ctx->stream->signal))
                        dp_set_dsc_enable(pipe_ctx, false);
        }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx)) {
                if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
                        pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
        }
-#endif
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (vpg && vpg->funcs->vpg_powerdown)
                vpg->funcs->vpg_powerdown(vpg);
-#endif
 }
 
 void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -4542,22 +4441,17 @@ void dc_link_set_drive_settings(struct dc *dc,
 {
 
        int i;
-       struct pipe_ctx *pipe = NULL;
-       const struct link_resource *link_res;
+       struct link_resource link_res;
 
-       link_res = dc_link_get_cur_link_res(link);
+       for (i = 0; i < dc->link_count; i++)
+               if (dc->links[i] == link)
+                       break;
 
-       for (i = 0; i < MAX_PIPES; i++) {
-               pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-               if (pipe->stream && pipe->stream->link) {
-                       if (pipe->stream->link == link)
-                               break;
-               }
-       }
-       if (pipe && link_res)
-               dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings);
-       else
+       if (i >= dc->link_count)
                ASSERT_CRITICAL(false);
+
+       dc_link_get_cur_link_res(link, &link_res);
+       dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
 }
 
 void dc_link_set_preferred_link_settings(struct dc *dc,
@@ -4617,11 +4511,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
 
        if (link_setting != NULL) {
                link->preferred_link_setting = *link_setting;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
                        /* TODO: add dc update for acquiring link res  */
                        skip_immediate_retrain = true;
-#endif
        } else {
                link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
                link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
@@ -4663,7 +4555,6 @@ uint32_t dc_link_bandwidth_kbps(
        const struct dc_link *link,
        const struct dc_link_settings *link_setting)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        uint32_t total_data_bw_efficiency_x10000 = 0;
        uint32_t link_rate_per_lane_kbps = 0;
 
@@ -4694,40 +4585,6 @@ uint32_t dc_link_bandwidth_kbps(
 
        /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
        return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
-#else
-       uint32_t link_bw_kbps =
-               link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
-
-       link_bw_kbps *= 8;   /* 8 bits per byte*/
-       link_bw_kbps *= link_setting->lane_count;
-
-       if (dc_link_should_enable_fec(link)) {
-               /* Account for FEC overhead.
-                * We have to do it based on caps,
-                * and not based on FEC being set ready,
-                * because FEC is set ready too late in
-                * the process to correctly be picked up
-                * by mode enumeration.
-                *
-                * There's enough zeros at the end of 'kbps'
-                * that make the below operation 100% precise
-                * for our purposes.
-                * 'long long' makes it work even for HDMI 2.1
-                * max bandwidth (and much, much bigger bandwidths
-                * than that, actually).
-                *
-                * NOTE: Reducing link BW by 3% may not be precise
-                * because it may be a stream BT that increases by 3%, and so
-                * 1/1.03 = 0.970873 factor should have been used instead,
-                * but the difference is minimal and is in a safe direction,
-                * which all works well around potential ambiguity of DP 1.4a spec.
-                */
-               long long fec_link_bw_kbps = link_bw_kbps * 970LL;
-               link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
-       }
-       return link_bw_kbps;
-
-#endif
 }
 
 const struct dc_link_settings *dc_link_get_link_cap(
@@ -4752,16 +4609,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
         */
        struct link_encoder *link_enc = NULL;
 
-       /* Links supporting dynamically assigned link encoder will be assigned next
-        * available encoder if one not already assigned.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign) {
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-               if (link_enc == NULL)
-                       link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
-       } else
-               link_enc = link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        return (dc_is_dp_signal(link->connector_signal) && link_enc &&
@@ -4845,23 +4693,24 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
 
 }
 
-const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link)
+void dc_link_get_cur_link_res(const struct dc_link *link,
+               struct link_resource *link_res)
 {
        int i;
        struct pipe_ctx *pipe = NULL;
-       const struct link_resource *link_res = NULL;
+
+       memset(link_res, 0, sizeof(*link_res));
 
        for (i = 0; i < MAX_PIPES; i++) {
                pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
                        if (pipe->stream->link == link) {
-                               link_res = &pipe->link_res;
+                               *link_res = pipe->link_res;
                                break;
                        }
                }
        }
 
-       return link_res;
 }
 
 /**
@@ -4883,9 +4732,8 @@ const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link)
  */
 void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dc_link *link;
-       uint8_t i;
+       uint32_t i;
        uint32_t hpo_dp_recycle_map = 0;
 
        *map = 0;
@@ -4903,7 +4751,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
                }
                *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
        }
-#endif
 }
 
 /**
@@ -4926,9 +4773,8 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
  */
 void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dc_link *link;
-       uint8_t i;
+       uint32_t i;
        unsigned int available_hpo_dp_count;
        uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
                        >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
@@ -4964,5 +4810,4 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
                        }
                }
        }
-#endif
 }
index 4c3ab2575e4ba98d7a56ce75bfda44771b6945ea..cd9c31b5e55d395f0d4536edd1d8fefcb4bc0790 100644 (file)
@@ -27,6 +27,7 @@
 #include "dm_helpers.h"
 #include "opp.h"
 #include "dsc.h"
+#include "clk_mgr.h"
 #include "resource.h"
 
 #include "inc/core_types.h"
@@ -62,7 +63,6 @@ enum {
        POST_LT_ADJ_REQ_TIMEOUT = 200
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 struct dp_lt_fallback_entry {
        enum dc_lane_count lane_count;
        enum dc_link_rate link_rate;
@@ -97,16 +97,18 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
                {LANE_COUNT_ONE, LINK_RATE_HIGH},
                {LANE_COUNT_ONE, LINK_RATE_LOW},
 };
-#endif
+
+static const struct dc_link_settings fail_safe_link_settings = {
+               .lane_count = LANE_COUNT_ONE,
+               .link_rate = LINK_RATE_LOW,
+               .link_spread = LINK_SPREAD_DISABLED,
+};
 
 static bool decide_fallback_link_setting(
                struct dc_link *link,
                struct dc_link_settings initial_link_settings,
                struct dc_link_settings *current_link_setting,
                enum link_training_result training_result);
-static struct dc_link_settings get_common_supported_link_settings(
-               struct dc_link_settings link_setting_a,
-               struct dc_link_settings link_setting_b);
 static void maximize_lane_settings(const struct link_training_settings *lt_settings,
                struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
 static void override_lane_settings(const struct link_training_settings *lt_settings,
@@ -117,7 +119,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
 {
        union training_aux_rd_interval training_rd_interval;
        uint32_t wait_in_micro_secs = 100;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
        memset(&training_rd_interval, 0, sizeof(training_rd_interval));
        if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
                        link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
@@ -129,15 +131,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
                if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
                        wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
        }
-#else
-       core_link_read_dpcd(
-                       link,
-                       DP_TRAINING_AUX_RD_INTERVAL,
-                       (uint8_t *)&training_rd_interval,
-                       sizeof(training_rd_interval));
-       if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
-               wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;  
-#endif
+
        return wait_in_micro_secs;
 }
 
@@ -145,7 +139,6 @@ static uint32_t get_eq_training_aux_rd_interval(
        struct dc_link *link,
        const struct dc_link_settings *link_settings)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union training_aux_rd_interval training_rd_interval;
 
        memset(&training_rd_interval, 0, sizeof(training_rd_interval));
@@ -174,41 +167,16 @@ static uint32_t get_eq_training_aux_rd_interval(
        case 6: return 64000;
        default: return 400;
        }
-#else
-       union training_aux_rd_interval training_rd_interval;
-       uint32_t wait_in_micro_secs = 400;
-
-       memset(&training_rd_interval, 0, sizeof(training_rd_interval));
-       /* overwrite the delay if rev > 1.1*/
-       if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
-               /* DP 1.2 or later - retrieve delay through
-                * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
-               core_link_read_dpcd(
-                       link,
-                       DP_TRAINING_AUX_RD_INTERVAL,
-                       (uint8_t *)&training_rd_interval,
-                       sizeof(training_rd_interval));
-
-               if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
-                       wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
-       }
-
-       return wait_in_micro_secs;
-#endif
 }
 
 void dp_wait_for_training_aux_rd_interval(
        struct dc_link *link,
        uint32_t wait_in_micro_secs)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (wait_in_micro_secs > 1000)
                msleep(wait_in_micro_secs/1000);
        else
                udelay(wait_in_micro_secs);
-#else
-       udelay(wait_in_micro_secs);
-#endif
 
        DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
                __func__,
@@ -236,7 +204,6 @@ enum dpcd_training_patterns
        case DP_TRAINING_PATTERN_SEQUENCE_4:
                dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case DP_128b_132b_TPS1:
                dpcd_tr_pattern = DPCD_128b_132b_TPS1;
                break;
@@ -246,7 +213,6 @@ enum dpcd_training_patterns
        case DP_128b_132b_TPS2_CDS:
                dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
                break;
-#endif
        case DP_TRAINING_PATTERN_VIDEOIDLE:
                dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
                break;
@@ -289,10 +255,8 @@ static enum dc_dp_training_pattern decide_cr_training_pattern(
        case DP_8b_10b_ENCODING:
        default:
                return DP_TRAINING_PATTERN_SEQUENCE_1;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case DP_128b_132b_ENCODING:
                return DP_128b_132b_TPS1;
-#endif
        }
 }
 
@@ -300,19 +264,11 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
                const struct dc_link_settings *link_settings)
 {
        struct link_encoder *link_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct encoder_feature_support *enc_caps;
        struct dpcd_caps *rx_caps = &link->dpcd_caps;
        enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
 
-       /* Access link encoder capability based on whether it is statically
-        * or dynamically assigned to a link.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       else
-               link_enc = link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
        enc_caps = &link_enc->features;
 
@@ -335,41 +291,8 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
                break;
        }
        return pattern;
-#else
-       enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
-       struct encoder_feature_support *features;
-       struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
-
-       /* Access link encoder capability based on whether it is statically
-        * or dynamically assigned to a link.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       else
-               link_enc = link->link_enc;
-       ASSERT(link_enc);
-       features = &link_enc->features;
-
-       if (features->flags.bits.IS_TPS3_CAPABLE)
-               highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3;
-
-       if (features->flags.bits.IS_TPS4_CAPABLE)
-               highest_tp = DP_TRAINING_PATTERN_SEQUENCE_4;
-
-       if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
-               highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_4)
-               return DP_TRAINING_PATTERN_SEQUENCE_4;
-
-       if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
-               highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_3)
-               return DP_TRAINING_PATTERN_SEQUENCE_3;
-
-       return DP_TRAINING_PATTERN_SEQUENCE_2;
-#endif
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
 {
        uint8_t link_rate = 0;
@@ -397,7 +320,6 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
 
        return link_rate;
 }
-#endif
 
 static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
 {
@@ -540,10 +462,8 @@ static void vendor_specific_lttpr_wa_four(
        const uint8_t offset = dp_convert_to_count(
                        link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
        uint32_t vendor_lttpr_write_address = 0xF004F;
-#if defined(CONFIG_DRM_AMD_DC_DP2_0)
        uint8_t sink_status = 0;
        uint8_t i;
-#endif
 
        if (offset != 0xFF)
                vendor_lttpr_write_address +=
@@ -569,7 +489,6 @@ static void vendor_specific_lttpr_wa_four(
                                sizeof(vendor_lttpr_write_data_two));
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DP2_0)
        /* poll for intra-hop disable */
        for (i = 0; i < 10; i++) {
                if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
@@ -577,7 +496,6 @@ static void vendor_specific_lttpr_wa_four(
                        break;
                udelay(1000);
        }
-#endif
 }
 
 static void vendor_specific_lttpr_wa_five(
@@ -665,11 +583,7 @@ enum dc_status dpcd_set_link_settings(
                status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
                                &lt_settings->link_settings.link_rate_set, 1);
        } else {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                rate = get_dpcd_link_rate(&lt_settings->link_settings);
-#else
-               rate = (uint8_t) (lt_settings->link_settings.link_rate);
-#endif
                if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
                                        (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
                                        link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
@@ -720,10 +634,8 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
                disable_scrabled_data_symbols = 1;
                break;
        case DP_TRAINING_PATTERN_SEQUENCE_4:
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case DP_128b_132b_TPS1:
        case DP_128b_132b_TPS2:
-#endif
                disable_scrabled_data_symbols = 0;
                break;
        default:
@@ -794,7 +706,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                size_in_bytes);
 
        if (is_repeater(link, offset)) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_128b_132b_ENCODING)
                        DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -805,7 +716,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                                        lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
                else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_8b_10b_ENCODING)
-#endif
                DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
                                " 0x%X VS set = %x PE set = %x max VS Reached = %x  max PE Reached = %x\n",
                        __func__,
@@ -816,7 +726,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                        lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
                        lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
        } else {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_128b_132b_ENCODING)
                        DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
@@ -825,14 +734,13 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                                        lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
                else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_8b_10b_ENCODING)
-#endif
-               DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
-                       __func__,
-                       dpcd_base_lt_offset,
-                       lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
-                       lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
-                       lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
-                       lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+                       DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
+                                       __func__,
+                                       dpcd_base_lt_offset,
+                                       lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+                                       lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+                                       lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+                                       lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
        }
        if (edp_workaround) {
                /* for eDP write in 2 parts because the 5-byte burst is
@@ -850,7 +758,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                        (uint8_t *)(lt_settings->dpcd_lane_settings),
                        size_in_bytes);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                        DP_128b_132b_ENCODING) {
                core_link_write_dpcd(
@@ -858,10 +765,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
                                dpcd_base_lt_offset,
                                dpcd_lt_buffer,
                                sizeof(dpcd_lt_buffer));
-#endif
-               } else
+       } else
                /* write it all in (1 + number-of-lanes)-byte burst*/
-                       core_link_write_dpcd(
+               core_link_write_dpcd(
                                link,
                                dpcd_base_lt_offset,
                                dpcd_lt_buffer,
@@ -928,13 +834,11 @@ void dp_hw_to_dpcd_lane_settings(
                                        (hw_lane_settings[lane].PRE_EMPHASIS ==
                                                        PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
                }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_128b_132b_ENCODING) {
                        dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
                                        hw_lane_settings[lane].FFE_PRESET.settings.level;
                }
-#endif
        }
 }
 
@@ -956,13 +860,11 @@ void dp_decide_lane_settings(
                                        (enum dc_pre_emphasis)(ln_adjust[lane].bits.
                                                        PRE_EMPHASIS_LANE);
                }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_128b_132b_ENCODING) {
                        hw_lane_settings[lane].FFE_PRESET.raw =
                                        ln_adjust[lane].tx_ffe.PRESET_VALUE;
                }
-#endif
        }
        dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
 
@@ -1013,9 +915,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti
 
        max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
        max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
-#endif
 
        /* Determine what the maximum of the requested settings are*/
        for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
@@ -1024,12 +924,10 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti
 
                if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
                        max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (lane_settings[lane].FFE_PRESET.settings.level >
                                max_requested.FFE_PRESET.settings.level)
                        max_requested.FFE_PRESET.settings.level =
                                        lane_settings[lane].FFE_PRESET.settings.level;
-#endif
        }
 
        /* make sure the requested settings are
@@ -1039,10 +937,8 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti
 
        if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
                max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
                max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
-#endif
 
        /* make sure the pre-emphasis matches the voltage swing*/
        if (max_requested.PRE_EMPHASIS >
@@ -1055,9 +951,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti
        for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
                lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
                lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
-#endif
        }
 }
 
@@ -1068,9 +962,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti
 
        if (lt_settings->voltage_swing == NULL &&
            lt_settings->pre_emphasis == NULL &&
-#if defined(CONFIG_DRM_AMD_DC_DCN)
            lt_settings->ffe_preset == NULL &&
-#endif
            lt_settings->post_cursor2 == NULL)
 
                return;
@@ -1082,10 +974,8 @@ static void override_lane_settings(const struct link_training_settings *lt_setti
                        lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
                if (lt_settings->post_cursor2)
                        lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (lt_settings->ffe_preset)
                        lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
-#endif
        }
 }
 
@@ -1189,7 +1079,6 @@ enum dc_status dpcd_set_lane_settings(
                link_training_setting->link_settings.lane_count);
 
        if (is_repeater(link, offset)) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
                                DP_128b_132b_ENCODING)
                        DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -1200,7 +1089,6 @@ enum dc_status dpcd_set_lane_settings(
                                        link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
                else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
                                DP_8b_10b_ENCODING)
-#endif
                DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
                                " 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
                        __func__,
@@ -1212,7 +1100,6 @@ enum dc_status dpcd_set_lane_settings(
                        link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
 
        } else {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
                                DP_128b_132b_ENCODING)
                        DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
@@ -1221,7 +1108,6 @@ enum dc_status dpcd_set_lane_settings(
                                        link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
                else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
                                DP_8b_10b_ENCODING)
-#endif
                DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x  PE set = %x max VS Reached = %x  max PE Reached = %x\n",
                        __func__,
                        lane0_set_address,
@@ -1357,14 +1243,12 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval
        case 0x04:
                aux_rd_interval_us = 16000;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case 0x05:
                aux_rd_interval_us = 32000;
                break;
        case 0x06:
                aux_rd_interval_us = 64000;
                break;
-#endif
        default:
                break;
        }
@@ -1405,13 +1289,8 @@ static enum link_training_result perform_channel_equalization_sequence(
        /* Note: also check that TPS4 is a supported feature*/
        tr_pattern = lt_settings->pattern_for_eq;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
                tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
-#else
-       if (is_repeater(link, offset))
-               tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
-#endif
 
        dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
 
@@ -1582,15 +1461,10 @@ static enum link_training_result perform_clock_recovery_sequence(
                        return LINK_TRAINING_SUCCESS;
 
                /* 6. max VS reached*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
                                DP_8b_10b_ENCODING) &&
                                dp_is_max_vs_reached(lt_settings))
                        break;
-#else
-               if (dp_is_max_vs_reached(lt_settings))
-                       break;
-#endif
 
                /* 7. same lane settings*/
                /* Note: settings are the same for all lanes,
@@ -1599,12 +1473,10 @@ static enum link_training_result perform_clock_recovery_sequence(
                                lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
                                                dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
                        retries_cr++;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
                                lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
                                                dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
                        retries_cr++;
-#endif
                else
                        retries_cr = 0;
 
@@ -1642,11 +1514,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
         * TPS4 must be used instead of POST_LT_ADJ_REQ.
         */
        if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
-#else
-                       lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
-#endif
                /* delay 5ms after Main Link output idle pattern and then check
                 * DPCD 0202h.
                 */
@@ -1745,7 +1613,6 @@ static inline void decide_8b_10b_training_settings(
        dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static inline void decide_128b_132b_training_settings(struct dc_link *link,
                const struct dc_link_settings *link_settings,
                struct link_training_settings *lt_settings)
@@ -1772,7 +1639,6 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link,
        dp_hw_to_dpcd_lane_settings(lt_settings,
                        lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
 }
-#endif
 
 void dp_decide_training_settings(
                struct dc_link *link,
@@ -1781,10 +1647,8 @@ void dp_decide_training_settings(
 {
        if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
                decide_8b_10b_training_settings(link, link_settings, lt_settings);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
                decide_128b_132b_training_settings(link, link_settings, lt_settings);
-#endif
 }
 
 static void override_training_settings(
@@ -1807,10 +1671,8 @@ static void override_training_settings(
                lt_settings->pre_emphasis = overrides->pre_emphasis;
        if (overrides->post_cursor2 != NULL)
                lt_settings->post_cursor2 = overrides->post_cursor2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (overrides->ffe_preset != NULL)
                lt_settings->ffe_preset = overrides->ffe_preset;
-#endif
        /* Override HW lane settings with BIOS forced values if present */
        if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
                        link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
@@ -2014,7 +1876,6 @@ static void print_status_message(
        case LINK_RATE_HIGH3:
                link_rate = "HBR3";
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case LINK_RATE_UHBR10:
                link_rate = "UHBR10";
                break;
@@ -2024,7 +1885,6 @@ static void print_status_message(
        case LINK_RATE_UHBR20:
                link_rate = "UHBR20";
                break;
-#endif
        default:
                break;
        }
@@ -2054,7 +1914,6 @@ static void print_status_message(
        case LINK_TRAINING_LINK_LOSS:
                lt_result = "Link loss";
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case DP_128b_132b_LT_FAILED:
                lt_result = "LT_FAILED received";
                break;
@@ -2067,7 +1926,6 @@ static void print_status_message(
        case DP_128b_132b_CDS_DONE_TIMEOUT:
                lt_result = "CDS timeout";
                break;
-#endif
        default:
                break;
        }
@@ -2087,9 +1945,9 @@ static void print_status_message(
        }
 
        /* Connectivity log: link training */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
        /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
-#endif
+
        CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
                                link_rate,
                                lt_settings->link_settings.lane_count,
@@ -2177,15 +2035,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
 
 static void dpcd_exit_training_mode(struct dc_link *link)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        uint8_t sink_status = 0;
        uint8_t i;
-#endif
 
        /* clear training pattern set */
        dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* poll for intra-hop disable */
        for (i = 0; i < 10; i++) {
                if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
@@ -2193,7 +2048,6 @@ static void dpcd_exit_training_mode(struct dc_link *link)
                        break;
                udelay(1000);
        }
-#endif
 }
 
 enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -2217,7 +2071,6 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
        return status;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
                uint32_t *interval_in_us)
 {
@@ -2348,7 +2201,6 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
 
        return status;
 }
-#endif
 
 static enum link_training_result dp_perform_8b_10b_link_training(
                struct dc_link *link,
@@ -2398,18 +2250,17 @@ static enum link_training_result dp_perform_8b_10b_link_training(
 
        if (status == LINK_TRAINING_SUCCESS) {
                status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
-       if (status == LINK_TRAINING_SUCCESS) {
-               status = perform_channel_equalization_sequence(link,
-                                       link_res,
-                                       lt_settings,
-                                       DPRX);
+               if (status == LINK_TRAINING_SUCCESS) {
+                       status = perform_channel_equalization_sequence(link,
+                                                                      link_res,
+                                                                      lt_settings,
+                                                                      DPRX);
                }
        }
 
        return status;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static enum link_training_result dp_perform_128b_132b_link_training(
                struct dc_link *link,
                const struct link_resource *link_res,
@@ -2437,7 +2288,6 @@ static enum link_training_result dp_perform_128b_132b_link_training(
 
        return result;
 }
-#endif
 
 static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence(
        struct dc_link *link,
@@ -2514,11 +2364,7 @@ static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequenc
        core_link_write_dpcd(link, DP_LANE_COUNT_SET,
                &lane_count_set.raw, 1);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        rate = get_dpcd_link_rate(&lt_settings->link_settings);
-#else
-       rate = (uint8_t) (lt_settings->link_settings.link_rate);
-#endif
 
        /* Vendor specific: Toggle link rate */
        toggle_rate = (rate == 0x6) ? 0xA : 0x6;
@@ -2819,10 +2665,8 @@ enum link_training_result dc_link_dp_perform_link_training(
                status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
        else if (encoding == DP_8b_10b_ENCODING)
                status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        else if (encoding == DP_128b_132b_ENCODING)
                status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
-#endif
        else
                ASSERT(0);
 
@@ -2863,26 +2707,15 @@ bool perform_link_training_with_retries(
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
        enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
-       struct link_encoder *link_enc;
        enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
        struct dc_link_settings current_setting = *link_setting;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
 
-       /* Dynamically assigned link encoders associated with stream rather than
-        * link.
-        */
-       if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
-       else
-               link_enc = link->link_enc;
-
-       /* We need to do this before the link training to ensure the idle pattern in SST
-        * mode will be sent right after the link training
-        */
-       if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING) {
-               link_enc->funcs->connect_dig_be_to_fe(link_enc,
-                                                       pipe_ctx->stream_res.stream_enc->id, true);
-               dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
-       }
+       if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING)
+               /* We need to do this before the link training to ensure the idle
+                * pattern in SST mode will be sent right after the link training
+                */
+               link_hwss->setup_stream_encoder(pipe_ctx);
 
        for (j = 0; j < attempts; ++j) {
 
@@ -3074,14 +2907,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
                dp_cs_id, link_settings);
 
        /* Set FEC enable */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
-#endif
                fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
                dp_set_fec_ready(link, NULL, fec_enable);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        }
-#endif
 
        if (lt_overrides->alternate_scrambler_reset) {
                if (*lt_overrides->alternate_scrambler_reset)
@@ -3124,13 +2953,9 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
         * Still shouldn't turn off dp_receiver (DPCD:600h)
         */
        if (link_down == true) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                struct dc_link_settings link_settings = link->cur_link_settings;
-#endif
                dp_disable_link_phy(link, NULL, link->connector_signal);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
-#endif
                        dp_set_fec_ready(link, NULL, false);
        }
 
@@ -3138,7 +2963,6 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
        return true;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
 {
        enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
@@ -3152,7 +2976,20 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
 
        return lttpr_max_link_rate;
 }
-#endif
+
+static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
+{
+       enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3;
+
+       if (link->dpcd_caps.cable_attributes.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+               cable_max_link_rate = LINK_RATE_UHBR20;
+       else if (link->dpcd_caps.cable_attributes.bits.UHBR13_5_CAPABILITY)
+               cable_max_link_rate = LINK_RATE_UHBR13_5;
+       else if (link->dpcd_caps.cable_attributes.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
+               cable_max_link_rate = LINK_RATE_UHBR10;
+
+       return cable_max_link_rate;
+}
 
 bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
 {
@@ -3163,16 +3000,7 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
                return false;
        }
 
-       /* Links supporting dynamically assigned link encoder will be assigned next
-        * available encoder if one not already assigned.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign) {
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-               if (link_enc == NULL)
-                       link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
-       } else
-               link_enc = link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        if (link_enc && link_enc->funcs->get_max_link_cap) {
@@ -3186,37 +3014,21 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
        return false;
 }
 
-static struct dc_link_settings get_max_link_cap(struct dc_link *link,
-               const struct link_resource *link_res)
+
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
 {
        struct dc_link_settings max_link_cap = {0};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        enum dc_link_rate lttpr_max_link_rate;
-#endif
+       enum dc_link_rate cable_max_link_rate;
        struct link_encoder *link_enc = NULL;
 
-       /* Links supporting dynamically assigned link encoder will be assigned next
-        * available encoder if one not already assigned.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign) {
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-               if (link_enc == NULL)
-                       link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
-       } else
-               link_enc = link->link_enc;
+
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        /* get max link encoder capability */
        if (link_enc)
                link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (max_link_cap.link_rate >= LINK_RATE_UHBR10) {
-               if (!link_res->hpo_dp_link_enc ||
-                               link->dc->debug.disable_uhbr)
-                       max_link_cap.link_rate = LINK_RATE_HIGH3;
-       }
-#endif
 
        /* Lower link settings based on sink's link cap */
        if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -3229,6 +3041,14 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link,
                        max_link_cap.link_spread)
                max_link_cap.link_spread =
                                link->reported_link_cap.link_spread;
+
+       /* Lower link settings based on cable attributes */
+       cable_max_link_rate = get_cable_max_link_rate(link);
+
+       if (!link->dc->debug.ignore_cable_id &&
+                       cable_max_link_rate < max_link_cap.link_rate)
+               max_link_cap.link_rate = cable_max_link_rate;
+
        /*
         * account for lttpr repeaters cap
         * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
@@ -3236,22 +3056,21 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link,
        if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {
                if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
                        max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                lttpr_max_link_rate = get_lttpr_max_link_rate(link);
 
                if (lttpr_max_link_rate < max_link_cap.link_rate)
                        max_link_cap.link_rate = lttpr_max_link_rate;
-#else
-               if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
-                       max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
-#endif
 
                DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR,  max_lane count %d max_link rate %d \n",
                                                __func__,
                                                max_link_cap.lane_count,
                                                max_link_cap.link_rate);
        }
+
+       if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
+                       link->dc->debug.disable_uhbr)
+               max_link_cap.link_rate = LINK_RATE_HIGH3;
+
        return max_link_cap;
 }
 
@@ -3370,48 +3189,22 @@ bool hpd_rx_irq_check_link_loss_status(
        return return_code;
 }
 
-bool dp_verify_link_cap(
+static bool dp_verify_link_cap(
        struct dc_link *link,
-       const struct link_resource *link_res,
        struct dc_link_settings *known_limit_link_setting,
        int *fail_count)
 {
-       struct dc_link_settings max_link_cap = {0};
-       struct dc_link_settings cur_link_setting = {0};
-       struct dc_link_settings *cur = &cur_link_setting;
-       struct dc_link_settings initial_link_settings = {0};
-       bool success;
-       bool skip_link_training;
+       struct dc_link_settings cur_link_settings = {0};
+       struct dc_link_settings initial_link_settings = *known_limit_link_setting;
+       bool success = false;
        bool skip_video_pattern;
-       enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
-       enum link_training_result status;
+       enum clock_source_id dp_cs_id = get_clock_source_id(link);
+       enum link_training_result status = LINK_TRAINING_SUCCESS;
        union hpd_irq_data irq_data;
-
-       /* link training starts with the maximum common settings
-        * supported by both sink and ASIC.
-        */
-       max_link_cap = get_max_link_cap(link, link_res);
-       initial_link_settings = get_common_supported_link_settings(
-                       *known_limit_link_setting,
-                       max_link_cap);
-
-       /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */
-       if (link->dc->debug.skip_detection_link_training ||
-                       link->is_dig_mapping_flexible) {
-               /* TODO - should we check link encoder's max link caps here?
-                * How do we know which link encoder to check from?
-                */
-               link->verified_link_cap = *known_limit_link_setting;
-               return true;
-       } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign &&
-                       !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) {
-               link->verified_link_cap = initial_link_settings;
-               return true;
-       }
+       struct link_resource link_res;
 
        memset(&irq_data, 0, sizeof(irq_data));
-       success = false;
-       skip_link_training = false;
+       cur_link_settings = initial_link_settings;
 
        /* Grant extended timeout request */
        if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
@@ -3420,104 +3213,72 @@ bool dp_verify_link_cap(
                core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
-               reset_dp_hpo_stream_encoders_for_link(link);
-#endif
-       /* TODO implement override and monitor patch later */
-
-       /* try to train the link from high to low to
-        * find the physical link capability
-        */
-       /* disable PHY done possible by BIOS, will be done by driver itself */
-       dp_disable_link_phy(link, link_res, link->connector_signal);
-
-       dp_cs_id = get_clock_source_id(link);
-
-       cur_link_setting = initial_link_settings;
-
-       /* Temporary Renoir-specific workaround for SWDEV-215184;
-        * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
-        * so add extra cycle of enabling and disabling the PHY before first link training.
-        */
-       if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
-                       link->dc->debug.usbc_combo_phy_reset_wa) {
-               dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur);
-               dp_disable_link_phy(link, link_res, link->connector_signal);
-       }
-
        do {
-               skip_video_pattern = true;
-
-               if (cur->link_rate == LINK_RATE_LOW)
-                       skip_video_pattern = false;
+               if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings))
+                       continue;
 
+               skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW;
                dp_enable_link_phy(
                                link,
-                               link_res,
+                               &link_res,
                                link->connector_signal,
                                dp_cs_id,
-                               cur);
+                               &cur_link_settings);
 
+               status = dc_link_dp_perform_link_training(
+                               link,
+                               &link_res,
+                               &cur_link_settings,
+                               skip_video_pattern);
 
-               if (skip_link_training)
+               if (status == LINK_TRAINING_SUCCESS) {
                        success = true;
-               else {
-                       status = dc_link_dp_perform_link_training(
+                       udelay(1000);
+                       if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
+                                       hpd_rx_irq_check_link_loss_status(
                                                        link,
-                                                       link_res,
-                                                       cur,
-                                                       skip_video_pattern);
-                       if (status == LINK_TRAINING_SUCCESS)
-                               success = true;
-                       else
+                                                       &irq_data))
                                (*fail_count)++;
-               }
 
-               if (success) {
-                       link->verified_link_cap = *cur;
-                       udelay(1000);
-                       if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK)
-                               if (hpd_rx_irq_check_link_loss_status(
-                                               link,
-                                               &irq_data))
-                                       (*fail_count)++;
+               } else {
+                       (*fail_count)++;
                }
-               /* always disable the link before trying another
-                * setting or before returning we'll enable it later
-                * based on the actual mode we're driving
-                */
-               dp_disable_link_phy(link, link_res, link->connector_signal);
+               dp_disable_link_phy(link, &link_res, link->connector_signal);
        } while (!success && decide_fallback_link_setting(link,
-                       initial_link_settings, cur, status));
-
-       /* Link Training failed for all Link Settings
-        *  (Lane Count is still unknown)
-        */
-       if (!success) {
-               /* If all LT fails for all settings,
-                * set verified = failed safe (1 lane low)
-                */
-               link->verified_link_cap.lane_count = LANE_COUNT_ONE;
-               link->verified_link_cap.link_rate = LINK_RATE_LOW;
+                       initial_link_settings, &cur_link_settings, status));
 
-               link->verified_link_cap.link_spread =
-               LINK_SPREAD_DISABLED;
-       }
+       link->verified_link_cap = success ?
+                       cur_link_settings : fail_safe_link_settings;
+       return success;
+}
 
+static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
+               struct dc_link_settings *link_settings)
+{
+       /* Temporary Renoir-specific workaround PHY will sometimes be in bad
+        * state on hotplugging display from certain USB-C dongle, so add extra
+        * cycle of enabling and disabling the PHY before first link training.
+        */
+       struct link_resource link_res = {0};
+       enum clock_source_id dp_cs_id = get_clock_source_id(link);
 
-       return success;
+       dp_enable_link_phy(link, &link_res, link->connector_signal,
+                       dp_cs_id, link_settings);
+       dp_disable_link_phy(link, &link_res, link->connector_signal);
 }
 
 bool dp_verify_link_cap_with_retries(
        struct dc_link *link,
-       const struct link_resource *link_res,
        struct dc_link_settings *known_limit_link_setting,
        int attempts)
 {
        int i = 0;
        bool success = false;
 
+       if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
+                       link->dc->debug.usbc_combo_phy_reset_wa)
+               apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
+
        for (i = 0; i < attempts; i++) {
                int fail_count = 0;
                enum dc_connection_type type = dc_connection_none;
@@ -3525,12 +3286,9 @@ bool dp_verify_link_cap_with_retries(
                memset(&link->verified_link_cap, 0,
                                sizeof(struct dc_link_settings));
                if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
-                       link->verified_link_cap.lane_count = LANE_COUNT_ONE;
-                       link->verified_link_cap.link_rate = LINK_RATE_LOW;
-                       link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
+                       link->verified_link_cap = fail_safe_link_settings;
                        break;
-               } else if (dp_verify_link_cap(link, link_res,
-                               known_limit_link_setting,
+               } else if (dp_verify_link_cap(link, known_limit_link_setting,
                                &fail_count) && fail_count == 0) {
                        success = true;
                        break;
@@ -3540,79 +3298,32 @@ bool dp_verify_link_cap_with_retries(
        return success;
 }
 
-bool dp_verify_mst_link_cap(
-       struct dc_link *link, const struct link_resource *link_res)
-{
-       struct dc_link_settings max_link_cap = {0};
-
-       if (dp_get_link_encoding_format(&link->reported_link_cap) ==
-                       DP_8b_10b_ENCODING) {
-               max_link_cap = get_max_link_cap(link, link_res);
-               link->verified_link_cap = get_common_supported_link_settings(
-                               link->reported_link_cap,
-                               max_link_cap);
-       }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
-                       DP_128b_132b_ENCODING) {
-               dp_verify_link_cap_with_retries(link,
-                               link_res,
-                               &link->reported_link_cap,
-                               LINK_TRAINING_MAX_VERIFY_RETRY);
-       }
-#endif
-       return true;
-}
-
-static struct dc_link_settings get_common_supported_link_settings(
-               struct dc_link_settings link_setting_a,
-               struct dc_link_settings link_setting_b)
+/* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+static enum dc_link_rate get_link_rate_from_max_link_bw(
+                uint8_t max_link_bw)
 {
-       struct dc_link_settings link_settings = {0};
+       enum dc_link_rate link_rate;
 
-       link_settings.lane_count =
-               (link_setting_a.lane_count <=
-                       link_setting_b.lane_count) ?
-                       link_setting_a.lane_count :
-                       link_setting_b.lane_count;
-       link_settings.link_rate =
-               (link_setting_a.link_rate <=
-                       link_setting_b.link_rate) ?
-                       link_setting_a.link_rate :
-                       link_setting_b.link_rate;
-       link_settings.link_spread = LINK_SPREAD_DISABLED;
-
-       /* in DP compliance test, DPR-120 may have
-        * a random value in its MAX_LINK_BW dpcd field.
-        * We map it to the maximum supported link rate that
-        * is smaller than MAX_LINK_BW in this case.
-        */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (link_settings.link_rate > LINK_RATE_UHBR20) {
-               link_settings.link_rate = LINK_RATE_UHBR20;
-       } else if (link_settings.link_rate < LINK_RATE_UHBR20 &&
-                       link_settings.link_rate > LINK_RATE_UHBR13_5) {
-               link_settings.link_rate = LINK_RATE_UHBR13_5;
-       } else if (link_settings.link_rate < LINK_RATE_UHBR10 &&
-                       link_settings.link_rate > LINK_RATE_HIGH3) {
-#else
-       if (link_settings.link_rate > LINK_RATE_HIGH3) {
-#endif
-               link_settings.link_rate = LINK_RATE_HIGH3;
-       } else if (link_settings.link_rate < LINK_RATE_HIGH3
-                       && link_settings.link_rate > LINK_RATE_HIGH2) {
-               link_settings.link_rate = LINK_RATE_HIGH2;
-       } else if (link_settings.link_rate < LINK_RATE_HIGH2
-                       && link_settings.link_rate > LINK_RATE_HIGH) {
-               link_settings.link_rate = LINK_RATE_HIGH;
-       } else if (link_settings.link_rate < LINK_RATE_HIGH
-                       && link_settings.link_rate > LINK_RATE_LOW) {
-               link_settings.link_rate = LINK_RATE_LOW;
-       } else if (link_settings.link_rate < LINK_RATE_LOW) {
-               link_settings.link_rate = LINK_RATE_UNKNOWN;
+       if (max_link_bw >= LINK_RATE_HIGH3) {
+               link_rate = LINK_RATE_HIGH3;
+       } else if (max_link_bw < LINK_RATE_HIGH3
+                       && max_link_bw >= LINK_RATE_HIGH2) {
+               link_rate = LINK_RATE_HIGH2;
+       } else if (max_link_bw < LINK_RATE_HIGH2
+                       && max_link_bw >= LINK_RATE_HIGH) {
+               link_rate = LINK_RATE_HIGH;
+       } else if (max_link_bw < LINK_RATE_HIGH
+                       && max_link_bw >= LINK_RATE_LOW) {
+               link_rate = LINK_RATE_LOW;
+       } else {
+               link_rate = LINK_RATE_UNKNOWN;
        }
 
-       return link_settings;
+       return link_rate;
 }
 
 static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
@@ -3642,14 +3353,12 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
 static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
 {
        switch (link_rate) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case LINK_RATE_UHBR20:
                return LINK_RATE_UHBR13_5;
        case LINK_RATE_UHBR13_5:
                return LINK_RATE_UHBR10;
        case LINK_RATE_UHBR10:
                return LINK_RATE_HIGH3;
-#endif
        case LINK_RATE_HIGH3:
                return LINK_RATE_HIGH2;
        case LINK_RATE_HIGH2:
@@ -3684,20 +3393,17 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
                return LINK_RATE_HIGH2;
        case LINK_RATE_HIGH2:
                return LINK_RATE_HIGH3;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case LINK_RATE_HIGH3:
                return LINK_RATE_UHBR10;
        case LINK_RATE_UHBR10:
                return LINK_RATE_UHBR13_5;
        case LINK_RATE_UHBR13_5:
                return LINK_RATE_UHBR20;
-#endif
        default:
                return LINK_RATE_UNKNOWN;
        }
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static bool decide_fallback_link_setting_max_bw_policy(
                const struct dc_link_settings *max,
                struct dc_link_settings *cur)
@@ -3731,7 +3437,6 @@ static bool decide_fallback_link_setting_max_bw_policy(
 
        return found;
 }
-#endif
 
 /*
  * function: set link rate and lane count fallback based
@@ -3749,12 +3454,10 @@ static bool decide_fallback_link_setting(
 {
        if (!current_link_setting)
                return false;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING ||
                        link->dc->debug.force_dp2_lt_fallback_method)
                return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
                                current_link_setting);
-#endif
 
        switch (training_result) {
        case LINK_TRAINING_CR_FAIL_LANE0:
@@ -3801,6 +3504,7 @@ static bool decide_fallback_link_setting(
                        current_link_setting->link_rate =
                                reduce_link_rate(
                                        current_link_setting->link_rate);
+                       current_link_setting->lane_count = initial_link_settings.lane_count;
                } else {
                        return false;
                }
@@ -3813,6 +3517,7 @@ static bool decide_fallback_link_setting(
                        current_link_setting->link_rate =
                                reduce_link_rate(
                                        current_link_setting->link_rate);
+                       current_link_setting->lane_count = initial_link_settings.lane_count;
                } else {
                        return false;
                }
@@ -4292,15 +3997,9 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
        union phy_test_pattern dpcd_test_pattern;
        union lane_adjust dpcd_lane_adjustment[2];
        unsigned char dpcd_post_cursor_2_adjustment = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        unsigned char test_pattern_buffer[
                        (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
                        DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
-#else
-       unsigned char test_pattern_buffer[
-                       (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
-                       DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
-#endif
        unsigned int test_pattern_size = 0;
        enum dp_test_pattern test_pattern;
        union lane_adjust dpcd_lane_adjust;
@@ -4371,7 +4070,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
        case PHY_TEST_PATTERN_CP2520_3:
                test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case PHY_TEST_PATTERN_128b_132b_TPS1:
                test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
                break;
@@ -4399,7 +4097,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
        case PHY_TEST_PATTERN_SQUARE_PULSE:
                test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
                break;
-#endif
        default:
                test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
        break;
@@ -4415,7 +4112,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
                                test_pattern_size);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
                test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
                core_link_read_dpcd(
@@ -4434,7 +4130,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
                                test_pattern_buffer,
                                test_pattern_size);
        }
-#endif
 
        /* prepare link training settings */
        link_training_settings.link_settings = link->cur_link_settings;
@@ -4455,14 +4150,11 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
                        link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
                                (enum dc_post_cursor2)
                                ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
-               }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+               } else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
                                DP_128b_132b_ENCODING) {
                        link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
                                        dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
                }
-#endif
        }
 
        dp_hw_to_dpcd_lane_settings(&link_training_settings,
@@ -4970,7 +4662,7 @@ uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
        return 0;
 }
 
-/**
+/*
  * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
  */
 static uint32_t intersect_frl_link_bw_support(
@@ -5159,7 +4851,6 @@ static void get_active_converter_info(
                        dp_hw_fw_revision.ieee_fw_rev,
                        sizeof(dp_hw_fw_revision.ieee_fw_rev));
        }
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
                        link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
                union dp_dfp_cap_ext dfp_cap_ext;
@@ -5195,7 +4886,6 @@ static void get_active_converter_info(
                DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
                DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
        }
-#endif
 }
 
 static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
@@ -5255,12 +4945,8 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
 
 bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        uint8_t lttpr_dpcd_data[8];
        bool allow_lttpr_non_transparent_mode = 0;
-#else
-       uint8_t lttpr_dpcd_data[6];
-#endif
        bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
        bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
@@ -5268,7 +4954,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
 
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
                        link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
                allow_lttpr_non_transparent_mode = 1;
@@ -5276,7 +4961,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
                        !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
                allow_lttpr_non_transparent_mode = 1;
        }
-#endif
 
        /*
         * Logic to determine LTTPR mode
@@ -5285,21 +4969,12 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
        if (vbios_lttpr_enable && vbios_lttpr_interop)
                link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
        else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (allow_lttpr_non_transparent_mode)
-#else
-               if (link->dc->config.allow_lttpr_non_transparent_mode)
-#endif
                        link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
                else
                        link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
        } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
-#else
-               if (!link->dc->config.allow_lttpr_non_transparent_mode
-                       || !link->dc->caps.extended_aux_timeout_support)
-#endif
                        link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
                else
                        link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
@@ -5349,7 +5024,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
                                lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
                                                                DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
                                lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
                                                                DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
@@ -5357,12 +5031,10 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
                link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
                                lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES -
                                                                DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-#endif
 
                /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
-               is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+               is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
                                link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
-                               link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
                                link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
                                link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
                if (is_lttpr_present) {
@@ -5374,6 +5046,13 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
        return is_lttpr_present;
 }
 
+
+static bool is_usbc_connector(struct dc_link *link)
+{
+       return link->link_enc &&
+                       link->link_enc->features.flags.bits.DP_IS_USB_C;
+}
+
 static bool retrieve_link_cap(struct dc_link *link)
 {
        /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5430,6 +5109,9 @@ static bool retrieve_link_cap(struct dc_link *link)
         */
        msleep(post_oui_delay);
 
+       /* Read cable ID and update receiver */
+       dpcd_update_cable_id(link);
+
        for (i = 0; i < read_dpcd_retry_cnt; i++) {
                status = core_link_read_dpcd(
                                link,
@@ -5516,6 +5198,9 @@ static bool retrieve_link_cap(struct dc_link *link)
 
        read_dp_device_vendor_id(link);
 
+       /* TODO - decouple raw mst capability from policy decision */
+       link->dpcd_caps.is_mst_capable = is_mst_supported(link);
+
        get_active_converter_info(ds_port.byte, link);
 
        dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -5534,8 +5219,8 @@ static bool retrieve_link_cap(struct dc_link *link)
 
        link->reported_link_cap.lane_count =
                link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
-       link->reported_link_cap.link_rate = dpcd_data[
-               DP_MAX_LINK_RATE - DP_DPCD_REV];
+       link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw(
+                       dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]);
        link->reported_link_cap.link_spread =
                link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
                LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -5597,6 +5282,26 @@ static bool retrieve_link_cap(struct dc_link *link)
                dp_hw_fw_revision.ieee_fw_rev,
                sizeof(dp_hw_fw_revision.ieee_fw_rev));
 
+       /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
+       {
+               uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+               uint8_t fwrev_mbp_2018[] = { 7, 4 };
+               uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+               /* We also check for the firmware revision as 16,1 models have an
+                * identical device id and are incorrectly quirked otherwise.
+                */
+               if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+                   !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+                            sizeof(str_mbp_2018)) &&
+                   (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+                            sizeof(fwrev_mbp_2018)) ||
+                   !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+                            sizeof(fwrev_mbp_2018_vega)))) {
+                       link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+               }
+       }
+
        memset(&link->dpcd_caps.dsc_caps, '\0',
                        sizeof(link->dpcd_caps.dsc_caps));
        memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -5612,7 +5317,6 @@ static bool retrieve_link_cap(struct dc_link *link)
                                DP_DSC_SUPPORT,
                                link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
                                sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
                        status = core_link_read_dpcd(
                                        link,
@@ -5627,19 +5331,33 @@ static bool retrieve_link_cap(struct dc_link *link)
                        DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
                                        link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
                }
-#else
-               status = core_link_read_dpcd(
-                               link,
-                               DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
-                               link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
-                               sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+
+               /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode
+                * only if required.
+                */
+               if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+                               !link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around &&
 #endif
+                               link->dpcd_caps.is_branch_dev &&
+                               link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+                               link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
+                               (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE ||
+                               link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) {
+                       /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA.
+                        * Clear FEC and DSC capabilities as a work around if that is not the case.
+                        */
+                       link->wa_flags.dpia_forced_tbt3_mode = true;
+                       memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps));
+                       memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+                       DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index);
+               } else
+                       link->wa_flags.dpia_forced_tbt3_mode = false;
        }
 
        if (!dpcd_read_sink_ext_caps(link))
                link->dpcd_sink_ext_caps.raw = 0;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV];
 
        if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
@@ -5686,7 +5404,6 @@ static bool retrieve_link_cap(struct dc_link *link)
                if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
                        DC_LOG_DP2("\tFEC aggregated error counters are supported");
        }
-#endif
 
        /* Connectivity log: detection */
        CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -5759,14 +5476,6 @@ bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
 bool detect_dp_sink_caps(struct dc_link *link)
 {
        return retrieve_link_cap(link);
-
-       /* dc init_hw has power encoder using default
-        * signal for connector. For native DP, no
-        * need to power up encoder again. If not native
-        * DP, hw_init may need check signal or power up
-        * encoder here.
-        */
-       /* TODO save sink caps in link->sink */
 }
 
 static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
@@ -5844,8 +5553,6 @@ void detect_edp_sink_caps(struct dc_link *link)
                        }
                }
        }
-       link->verified_link_cap = link->reported_link_cap;
-
        core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
                                                &backlight_adj_cap, sizeof(backlight_adj_cap));
 
@@ -6011,7 +5718,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
                else if (link->dc->hwss.set_disp_pattern_generator) {
                        struct pipe_ctx *odm_pipe;
                        int opp_cnt = 1;
-                       int dpg_width = width;
+                       int dpg_width;
 
                        for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
                                opp_cnt++;
@@ -6168,7 +5875,6 @@ bool dc_link_dp_set_test_pattern(
                case DP_TEST_PATTERN_CP2520_3:
                        pattern = PHY_TEST_PATTERN_CP2520_3;
                        break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                case DP_TEST_PATTERN_128b_132b_TPS1:
                        pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
                        break;
@@ -6196,7 +5902,6 @@ bool dc_link_dp_set_test_pattern(
                case DP_TEST_PATTERN_SQUARE_PULSE:
                        pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
                        break;
-#endif
                default:
                        return false;
                }
@@ -6462,14 +6167,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
        enum dc_status status = DC_OK;
        uint8_t fec_config = 0;
 
-       /* Access link encoder based on whether it is statically
-        * or dynamically assigned to a link.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       else
-               link_enc = link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        if (!dc_link_should_enable_fec(link))
@@ -6509,14 +6207,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 {
        struct link_encoder *link_enc = NULL;
 
-       /* Access link encoder based on whether it is statically
-        * or dynamically assigned to a link.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       else
-               link_enc = link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        if (!dc_link_should_enable_fec(link))
@@ -6542,23 +6233,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
        }
 }
 
-struct link_encoder *dp_get_link_enc(struct dc_link *link)
-{
-       struct link_encoder *link_enc;
-
-       link_enc = link->link_enc;
-       if (link->is_dig_mapping_flexible &&
-           link->dc->res_pool->funcs->link_encs_assign) {
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc,
-                                                                 link);
-               if (!link->link_enc)
-                       link_enc = link_enc_cfg_get_next_avail_link_enc(
-                               link->ctx->dc);
-       }
-
-       return link_enc;
-}
-
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
        if (!link->dc->vendor_signature.is_valid) {
@@ -6635,6 +6309,31 @@ void dpcd_set_source_specific_data(struct dc_link *link)
        }
 }
 
+void dpcd_update_cable_id(struct dc_link *link)
+{
+       struct link_encoder *link_enc = NULL;
+
+       link_enc = link_enc_cfg_get_link_enc(link);
+
+       if (!link_enc ||
+                       !link_enc->features.flags.bits.IS_UHBR10_CAPABLE ||
+                       link->dprx_status.cable_id_updated)
+               return;
+
+       /* Retrieve cable attributes */
+       if (!is_usbc_connector(link))
+               core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
+                               &link->dpcd_caps.cable_attributes.raw,
+                               sizeof(uint8_t));
+
+       /* Update receiver with cable attributes */
+       core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX,
+                       &link->dpcd_caps.cable_attributes.raw,
+                       sizeof(link->dpcd_caps.cable_attributes.raw));
+
+       link->dprx_status.cable_id_updated = 1;
+}
+
 bool dc_link_set_backlight_level_nits(struct dc_link *link,
                bool isHDR,
                uint32_t backlight_millinits,
@@ -6799,15 +6498,12 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings
        if ((link_settings->link_rate >= LINK_RATE_LOW) &&
                        (link_settings->link_rate <= LINK_RATE_HIGH3))
                return DP_8b_10b_ENCODING;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
                        (link_settings->link_rate <= LINK_RATE_UHBR20))
                return DP_128b_132b_ENCODING;
-#endif
        return DP_UNKNOWN_ENCODING;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
 {
        struct dc_link_settings link_settings = {0};
@@ -7024,7 +6720,6 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->link_res.hpo_dp_link_enc &&
                        dc_is_dp_signal(pipe_ctx->stream->signal));
 }
-#endif
 
 void edp_panel_backlight_power_on(struct dc_link *link)
 {
@@ -7036,3 +6731,621 @@ void edp_panel_backlight_power_on(struct dc_link *link)
        if (link->dc->hwss.edp_backlight_control)
                link->dc->hwss.edp_backlight_control(link, true);
 }
+
+void dc_link_dp_clear_rx_status(struct dc_link *link)
+{
+       memset(&link->dprx_status, 0, sizeof(link->dprx_status));
+}
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+       uint8_t state;
+
+       state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+       if (link->sync_lt_in_progress)
+               return;
+
+       core_link_write_dpcd(link, DP_SET_POWER, &state,
+                                                sizeof(state));
+
+}
+
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+       if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
+               core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+                                       &dp_test_mode, sizeof(dp_test_mode));
+}
+
+
+static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
+{
+       switch (lttpr_repeater_count) {
+       case 0x80: // 1 lttpr repeater
+               return 1;
+       case 0x40: // 2 lttpr repeaters
+               return 2;
+       case 0x20: // 3 lttpr repeaters
+               return 3;
+       case 0x10: // 4 lttpr repeaters
+               return 4;
+       case 0x08: // 5 lttpr repeaters
+               return 5;
+       case 0x04: // 6 lttpr repeaters
+               return 6;
+       case 0x02: // 7 lttpr repeaters
+               return 7;
+       case 0x01: // 8 lttpr repeaters
+               return 8;
+       default:
+               break;
+       }
+       return 0; // invalid value
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+       return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
+}
+
+void dp_enable_link_phy(
+       struct dc_link *link,
+       const struct link_resource *link_res,
+       enum signal_type signal,
+       enum clock_source_id clock_source,
+       const struct dc_link_settings *link_settings)
+{
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       struct pipe_ctx *pipes =
+                       link->dc->current_state->res_ctx.pipe_ctx;
+       struct clock_source *dp_cs =
+                       link->dc->res_pool->dp_clock_source;
+       const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+       unsigned int i;
+
+       if (link->connector_signal == SIGNAL_TYPE_EDP) {
+               link->dc->hwss.edp_power_control(link, true);
+               link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+       }
+
+       /* If the current pixel clock source is not DTO(happens after
+        * switching from HDMI passive dongle to DP on the same connector),
+        * switch the pixel clock source to DTO.
+        */
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream != NULL &&
+                       pipes[i].stream->link == link) {
+                       if (pipes[i].clock_source != NULL &&
+                                       pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+                               pipes[i].clock_source = dp_cs;
+                               pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
+                                               pipes[i].stream->timing.pix_clk_100hz;
+                               pipes[i].clock_source->funcs->program_pix_clk(
+                                                       pipes[i].clock_source,
+                                                       &pipes[i].stream_res.pix_clk_params,
+                                                       &pipes[i].pll_settings);
+                       }
+               }
+       }
+
+       link->cur_link_settings = *link_settings;
+
+       if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+               if (dc->clk_mgr->funcs->notify_link_rate_change)
+                       dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+       }
+
+       if (dmcu != NULL && dmcu->funcs->lock_phy)
+               dmcu->funcs->lock_phy(dmcu);
+
+       if (link_hwss->ext.enable_dp_link_output)
+               link_hwss->ext.enable_dp_link_output(link, link_res, signal,
+                               clock_source, link_settings);
+
+       if (dmcu != NULL && dmcu->funcs->unlock_phy)
+               dmcu->funcs->unlock_phy(dmcu);
+
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+       dp_receiver_power_ctrl(link, true);
+}
+
+void edp_add_delay_for_T9(struct dc_link *link)
+{
+       if (link->local_sink &&
+                       link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
+               udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
+}
+
+bool edp_receiver_ready_T9(struct dc_link *link)
+{
+       unsigned int tries = 0;
+       unsigned char sinkstatus = 0;
+       unsigned char edpRev = 0;
+       enum dc_status result = DC_OK;
+
+       result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+       /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+       if (result == DC_OK && edpRev >= DP_EDP_12) {
+               do {
+                       sinkstatus = 1;
+                       result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+                       if (sinkstatus == 0)
+                               break;
+                       if (result != DC_OK)
+                               break;
+                       udelay(100); //MAx T9
+               } while (++tries < 50);
+       }
+
+       return result;
+}
+bool edp_receiver_ready_T7(struct dc_link *link)
+{
+       unsigned char sinkstatus = 0;
+       unsigned char edpRev = 0;
+       enum dc_status result = DC_OK;
+
+       /* use absolute time stamp to constrain max T7*/
+       unsigned long long enter_timestamp = 0;
+       unsigned long long finish_timestamp = 0;
+       unsigned long long time_taken_in_ns = 0;
+
+       result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+       if (result == DC_OK && edpRev >= DP_EDP_12) {
+               /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+               enter_timestamp = dm_get_timestamp(link->ctx);
+               do {
+                       sinkstatus = 0;
+                       result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+                       if (sinkstatus == 1)
+                               break;
+                       if (result != DC_OK)
+                               break;
+                       udelay(25);
+                       finish_timestamp = dm_get_timestamp(link->ctx);
+                       time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+               } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+       }
+
+       if (link->local_sink &&
+                       link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
+               udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
+
+       return result;
+}
+
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+               enum signal_type signal)
+{
+       struct dc  *dc = link->ctx->dc;
+       struct dmcu *dmcu = dc->res_pool->dmcu;
+       const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+
+       if (!link->wa_flags.dp_keep_receiver_powered)
+               dp_receiver_power_ctrl(link, false);
+
+       if (signal == SIGNAL_TYPE_EDP) {
+               if (link->dc->hwss.edp_backlight_control)
+                       link->dc->hwss.edp_backlight_control(link, false);
+               if (link_hwss->ext.disable_dp_link_output)
+                       link_hwss->ext.disable_dp_link_output(link, link_res, signal);
+               link->dc->hwss.edp_power_control(link, false);
+       } else {
+               if (dmcu != NULL && dmcu->funcs->lock_phy)
+                       dmcu->funcs->lock_phy(dmcu);
+               if (link_hwss->ext.disable_dp_link_output)
+                       link_hwss->ext.disable_dp_link_output(link, link_res, signal);
+               if (dmcu != NULL && dmcu->funcs->unlock_phy)
+                       dmcu->funcs->unlock_phy(dmcu);
+       }
+
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
+       /* Clear current link setting.*/
+       memset(&link->cur_link_settings, 0,
+                       sizeof(link->cur_link_settings));
+
+       if (dc->clk_mgr->funcs->notify_link_rate_change)
+               dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+}
+
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+               enum signal_type signal)
+{
+       /* MST disable link only when no stream use the link */
+       if (link->mst_stream_alloc_table.stream_count > 0)
+               return;
+
+       dp_disable_link_phy(link, link_res, signal);
+
+       /* set the sink to SST mode after disabling the link */
+       dp_enable_mst_on_sink(link, false);
+}
+
+bool dp_set_hw_training_pattern(
+       struct dc_link *link,
+       const struct link_resource *link_res,
+       enum dc_dp_training_pattern pattern,
+       uint32_t offset)
+{
+       enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+       switch (pattern) {
+       case DP_TRAINING_PATTERN_SEQUENCE_1:
+               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+               break;
+       case DP_TRAINING_PATTERN_SEQUENCE_2:
+               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+               break;
+       case DP_TRAINING_PATTERN_SEQUENCE_3:
+               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+               break;
+       case DP_TRAINING_PATTERN_SEQUENCE_4:
+               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+               break;
+       case DP_128b_132b_TPS1:
+               test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
+               break;
+       case DP_128b_132b_TPS2:
+               test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
+               break;
+       default:
+               break;
+       }
+
+       dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
+
+       return true;
+}
+
+void dp_set_hw_lane_settings(
+       struct dc_link *link,
+       const struct link_resource *link_res,
+       const struct link_training_settings *link_settings,
+       uint32_t offset)
+{
+       const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+
+       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
+               return;
+
+       if (link_hwss->ext.set_dp_lane_settings)
+               link_hwss->ext.set_dp_lane_settings(link, link_res,
+                               &link_settings->link_settings,
+                               link_settings->hw_lane_settings);
+
+       memmove(link->cur_lane_setting,
+                       link_settings->hw_lane_settings,
+                       sizeof(link->cur_lane_setting));
+}
+
+void dp_set_hw_test_pattern(
+       struct dc_link *link,
+       const struct link_resource *link_res,
+       enum dp_test_pattern test_pattern,
+       uint8_t *custom_pattern,
+       uint32_t custom_pattern_size)
+{
+       const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+       struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+
+       pattern_param.dp_phy_pattern = test_pattern;
+       pattern_param.custom_pattern = custom_pattern;
+       pattern_param.custom_pattern_size = custom_pattern_size;
+       pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+       if (link_hwss->ext.set_dp_link_test_pattern)
+               link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param);
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+                       struct dc_link_settings *link_setting,
+                       bool skip_video_pattern)
+{
+       struct pipe_ctx *pipes =
+                       &link->dc->current_state->res_ctx.pipe_ctx[0];
+       unsigned int i;
+
+
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream != NULL &&
+                       !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
+                       pipes[i].stream->link != NULL &&
+                       pipes[i].stream_res.stream_enc != NULL &&
+                       pipes[i].stream->link == link) {
+                       udelay(100);
+
+                       pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
+                                       pipes[i].stream_res.stream_enc);
+
+                       /* disable any test pattern that might be active */
+                       dp_set_hw_test_pattern(link, &pipes[i].link_res,
+                                       DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+                       dp_receiver_power_ctrl(link, false);
+
+                       link->dc->hwss.disable_stream(&pipes[i]);
+                       if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
+                               (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
+
+                       if (link->link_enc)
+                               link->link_enc->funcs->disable_output(
+                                               link->link_enc,
+                                               SIGNAL_TYPE_DISPLAY_PORT);
+
+                       /* Clear current link setting. */
+                       memset(&link->cur_link_settings, 0,
+                               sizeof(link->cur_link_settings));
+
+                       perform_link_training_with_retries(
+                                       link_setting,
+                                       skip_video_pattern,
+                                       LINK_TRAINING_ATTEMPTS,
+                                       &pipes[i],
+                                       SIGNAL_TYPE_DISPLAY_PORT,
+                                       false);
+
+                       link->dc->hwss.enable_stream(&pipes[i]);
+
+                       link->dc->hwss.unblank_stream(&pipes[i],
+                                       link_setting);
+
+                       if (pipes[i].stream_res.audio) {
+                               /* notify audio driver for
+                                * audio modes of monitor */
+                               pipes[i].stream_res.audio->funcs->az_enable(
+                                               pipes[i].stream_res.audio);
+
+                               /* un-mute audio */
+                               /* TODO: audio should be per stream rather than
+                                * per link */
+                               pipes[i].stream_res.stream_enc->funcs->
+                               audio_mute_control(
+                                       pipes[i].stream_res.stream_enc, false);
+                       }
+               }
+       }
+}
+
+#undef DC_LOGGER
+#define DC_LOGGER \
+       dsc->ctx->logger
+static void dsc_optc_config_log(struct display_stream_compressor *dsc,
+               struct dsc_optc_config *config)
+{
+       uint32_t precision = 1 << 28;
+       uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
+       uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
+       uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
+
+       /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
+        * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
+        * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
+        */
+       ll_bytes_per_pix_fraq *= 10000000;
+       ll_bytes_per_pix_fraq /= precision;
+
+       DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
+                       config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
+       DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
+       DC_LOG_DSC("\tslice_width %d", config->slice_width);
+}
+
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       bool result = false;
+
+       if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+               result = true;
+       else
+               result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
+       return result;
+}
+
+/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
+ * i.e. after dp_enable_dsc_on_rx() had been called
+ */
+void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
+       struct pipe_ctx *odm_pipe;
+       int opp_cnt = 1;
+
+       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+               opp_cnt++;
+
+       if (enable) {
+               struct dsc_config dsc_cfg;
+               struct dsc_optc_config dsc_optc_cfg;
+               enum optc_dsc_mode optc_dsc_mode;
+
+               /* Enable DSC hw block */
+               dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+               dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+               dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+               dsc_cfg.color_depth = stream->timing.display_color_depth;
+               dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+               dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+               ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
+               dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+               dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
+               dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+               for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+                       struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+
+                       odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
+                       odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+               }
+               dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
+               dsc_cfg.pic_width *= opp_cnt;
+
+               optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+
+               /* Enable DSC in encoder */
+               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
+                               && !is_dp_128b_132b_signal(pipe_ctx)) {
+                       DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
+                       dsc_optc_config_log(dsc, &dsc_optc_cfg);
+                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
+                                                                       optc_dsc_mode,
+                                                                       dsc_optc_cfg.bytes_per_pixel,
+                                                                       dsc_optc_cfg.slice_width);
+
+                       /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
+               }
+
+               /* Enable DSC in OPTC */
+               DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+               dsc_optc_config_log(dsc, &dsc_optc_cfg);
+               pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
+                                                       optc_dsc_mode,
+                                                       dsc_optc_cfg.bytes_per_pixel,
+                                                       dsc_optc_cfg.slice_width);
+       } else {
+               /* disable DSC in OPTC */
+               pipe_ctx->stream_res.tg->funcs->set_dsc_config(
+                               pipe_ctx->stream_res.tg,
+                               OPTC_DSC_DISABLED, 0, 0);
+
+               /* disable DSC in stream encoder */
+               if (dc_is_dp_signal(stream->signal)) {
+                       if (is_dp_128b_132b_signal(pipe_ctx))
+                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
+                                                                               false,
+                                                                               NULL,
+                                                                               true);
+                       else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+                                               pipe_ctx->stream_res.stream_enc,
+                                               OPTC_DSC_DISABLED, 0, 0);
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                       pipe_ctx->stream_res.stream_enc, false, NULL, true);
+                       }
+               }
+
+               /* disable DSC block */
+               pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+               for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+                       odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
+       }
+}
+
+bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
+{
+       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+       bool result = false;
+
+       if (!pipe_ctx->stream->timing.flags.DSC)
+               goto out;
+       if (!dsc)
+               goto out;
+
+       if (enable) {
+               {
+                       dp_set_dsc_on_stream(pipe_ctx, true);
+                       result = true;
+               }
+       } else {
+               dp_set_dsc_on_rx(pipe_ctx, false);
+               dp_set_dsc_on_stream(pipe_ctx, false);
+               result = true;
+       }
+out:
+       return result;
+}
+
+/*
+ * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
+ * hence PPS info packet update need to use frame update instead of immediate update.
+ * Added parameter immediate_update for this purpose.
+ * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
+ * which is the only place where a "false" would be passed in for param immediate_update.
+ *
+ * immediate_update is only applicable when DSC is enabled.
+ */
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
+{
+       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+       struct dc_stream_state *stream = pipe_ctx->stream;
+
+       if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+               return false;
+
+       if (enable) {
+               struct dsc_config dsc_cfg;
+               uint8_t dsc_packed_pps[128];
+
+               memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+               memset(dsc_packed_pps, 0, 128);
+
+               /* Enable DSC hw block */
+               dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+               dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+               dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+               dsc_cfg.color_depth = stream->timing.display_color_depth;
+               dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+               dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+
+               DC_LOG_DSC(" ");
+               dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
+               if (dc_is_dp_signal(stream->signal)) {
+                       DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
+                       if (is_dp_128b_132b_signal(pipe_ctx))
+                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
+                                                                               true,
+                                                                               &dsc_packed_pps[0],
+                                                                               immediate_update);
+                       else
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                               pipe_ctx->stream_res.stream_enc,
+                                               true,
+                                               &dsc_packed_pps[0],
+                                               immediate_update);
+               }
+       } else {
+               /* disable DSC PPS in stream encoder */
+               if (dc_is_dp_signal(stream->signal)) {
+                       if (is_dp_128b_132b_signal(pipe_ctx))
+                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
+                                                                               false,
+                                                                               NULL,
+                                                                               true);
+                       else
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                               pipe_ctx->stream_res.stream_enc, false, NULL, true);
+               }
+       }
+
+       return true;
+}
+
+
+bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+       if (!pipe_ctx->stream->timing.flags.DSC)
+               return false;
+       if (!dsc)
+               return false;
+
+       dp_set_dsc_on_stream(pipe_ctx, true);
+       dp_set_dsc_pps_sdp(pipe_ctx, true, false);
+       return true;
+}
+
+#undef DC_LOGGER
+#define DC_LOGGER \
+       link->ctx->logger
index a55944da8d53fbb18da1d9e237c21b9234163e09..047c626a4a340e5c3196d121fe5857d51ea8a40b 100644 (file)
@@ -122,6 +122,7 @@ static void remove_link_enc_assignment(
                                stream->link_enc = NULL;
                                state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
                                state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
+                               dc_stream_release(stream);
                                break;
                        }
                }
@@ -486,7 +487,8 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
        }
 
        for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
-               if (encs_assigned[i] == ENGINE_ID_UNKNOWN) {
+               if (encs_assigned[i] == ENGINE_ID_UNKNOWN &&
+                               dc->res_pool->link_encoders[i] != NULL) {
                        link_enc = dc->res_pool->link_encoders[i];
                        break;
                }
@@ -506,6 +508,26 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
        return link_enc;
 }
 
+struct link_encoder *link_enc_cfg_get_link_enc(
+               const struct dc_link *link)
+{
+       struct link_encoder *link_enc = NULL;
+
+       /* Links supporting dynamically assigned link encoder will be assigned next
+        * available encoder if one not already assigned.
+        */
+       if (link->is_dig_mapping_flexible &&
+           link->dc->res_pool->funcs->link_encs_assign) {
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+               if (link_enc == NULL)
+                       link_enc = link_enc_cfg_get_next_avail_link_enc(
+                               link->ctx->dc);
+       } else
+               link_enc = link->link_enc;
+
+       return link_enc;
+}
+
 bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link)
 {
        bool is_avail = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
deleted file mode 100644 (file)
index 45d03d3..0000000
+++ /dev/null
@@ -1,917 +0,0 @@
-/* Copyright 2015 Advanced Micro Devices, Inc. */
-
-
-#include "dm_services.h"
-#include "dc.h"
-#include "inc/core_types.h"
-#include "include/ddc_service_types.h"
-#include "include/i2caux_interface.h"
-#include "link_hwss.h"
-#include "hw_sequencer.h"
-#include "dc_link_dp.h"
-#include "dc_link_ddc.h"
-#include "dm_helpers.h"
-#include "dpcd_defs.h"
-#include "dsc.h"
-#include "resource.h"
-#include "link_enc_cfg.h"
-#include "clk_mgr.h"
-#include "inc/link_dpcd.h"
-#include "dccg.h"
-
-static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
-{
-       switch (lttpr_repeater_count) {
-       case 0x80: // 1 lttpr repeater
-               return 1;
-       case 0x40: // 2 lttpr repeaters
-               return 2;
-       case 0x20: // 3 lttpr repeaters
-               return 3;
-       case 0x10: // 4 lttpr repeaters
-               return 4;
-       case 0x08: // 5 lttpr repeaters
-               return 5;
-       case 0x04: // 6 lttpr repeaters
-               return 6;
-       case 0x02: // 7 lttpr repeaters
-               return 7;
-       case 0x01: // 8 lttpr repeaters
-               return 8;
-       default:
-               break;
-       }
-       return 0; // invalid value
-}
-
-static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
-{
-       return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
-}
-
-void dp_receiver_power_ctrl(struct dc_link *link, bool on)
-{
-       uint8_t state;
-
-       state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
-
-       if (link->sync_lt_in_progress)
-               return;
-
-       core_link_write_dpcd(link, DP_SET_POWER, &state,
-                       sizeof(state));
-}
-
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
-{
-       if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
-               core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
-                                       &dp_test_mode, sizeof(dp_test_mode));
-}
-
-void dp_enable_link_phy(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum signal_type signal,
-       enum clock_source_id clock_source,
-       const struct dc_link_settings *link_settings)
-{
-       struct link_encoder *link_enc;
-       struct dc  *dc = link->ctx->dc;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-
-       struct pipe_ctx *pipes =
-                       link->dc->current_state->res_ctx.pipe_ctx;
-       struct clock_source *dp_cs =
-                       link->dc->res_pool->dp_clock_source;
-       unsigned int i;
-
-       /* Link should always be assigned encoder when en-/disabling. */
-       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
-       else
-               link_enc = link->link_enc;
-       ASSERT(link_enc);
-
-       if (link->connector_signal == SIGNAL_TYPE_EDP) {
-               link->dc->hwss.edp_power_control(link, true);
-               link->dc->hwss.edp_wait_for_hpd_ready(link, true);
-       }
-
-       /* If the current pixel clock source is not DTO(happens after
-        * switching from HDMI passive dongle to DP on the same connector),
-        * switch the pixel clock source to DTO.
-        */
-       for (i = 0; i < MAX_PIPES; i++) {
-               if (pipes[i].stream != NULL &&
-                       pipes[i].stream->link == link) {
-                       if (pipes[i].clock_source != NULL &&
-                                       pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
-                               pipes[i].clock_source = dp_cs;
-                               pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz =
-                                               pipes[i].stream->timing.pix_clk_100hz;
-                               pipes[i].clock_source->funcs->program_pix_clk(
-                                                       pipes[i].clock_source,
-                                                       &pipes[i].stream_res.pix_clk_params,
-                                                       &pipes[i].pll_settings);
-                       }
-               }
-       }
-
-       link->cur_link_settings = *link_settings;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
-               /* TODO - DP2.0 HW: notify link rate change here */
-       } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
-               if (dc->clk_mgr->funcs->notify_link_rate_change)
-                       dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-       }
-#else
-       if (dc->clk_mgr->funcs->notify_link_rate_change)
-               dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-#endif
-       if (dmcu != NULL && dmcu->funcs->lock_phy)
-               dmcu->funcs->lock_phy(dmcu);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
-               enable_dp_hpo_output(link, link_res, link_settings);
-       } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
-               if (dc_is_dp_sst_signal(signal)) {
-                       link_enc->funcs->enable_dp_output(
-                                                       link_enc,
-                                                       link_settings,
-                                                       clock_source);
-               } else {
-                       link_enc->funcs->enable_dp_mst_output(
-                                                       link_enc,
-                                                       link_settings,
-                                                       clock_source);
-               }
-       }
-#else
-       if (dc_is_dp_sst_signal(signal)) {
-               link_enc->funcs->enable_dp_output(
-                                               link_enc,
-                                               link_settings,
-                                               clock_source);
-       } else {
-               link_enc->funcs->enable_dp_mst_output(
-                                               link_enc,
-                                               link_settings,
-                                               clock_source);
-       }
-#endif
-       if (dmcu != NULL && dmcu->funcs->unlock_phy)
-               dmcu->funcs->unlock_phy(dmcu);
-
-       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
-       dp_receiver_power_ctrl(link, true);
-}
-
-void edp_add_delay_for_T9(struct dc_link *link)
-{
-       if (link->local_sink &&
-                       link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
-               udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
-}
-
-bool edp_receiver_ready_T9(struct dc_link *link)
-{
-       unsigned int tries = 0;
-       unsigned char sinkstatus = 0;
-       unsigned char edpRev = 0;
-       enum dc_status result;
-
-       result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
-    /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
-       if (result == DC_OK && edpRev >= DP_EDP_12) {
-               do {
-                       sinkstatus = 1;
-                       result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
-                       if (sinkstatus == 0)
-                               break;
-                       if (result != DC_OK)
-                               break;
-                       udelay(100); //MAx T9
-               } while (++tries < 50);
-       }
-
-       return result;
-}
-bool edp_receiver_ready_T7(struct dc_link *link)
-{
-       unsigned char sinkstatus = 0;
-       unsigned char edpRev = 0;
-       enum dc_status result;
-
-       /* use absolute time stamp to constrain max T7*/
-       unsigned long long enter_timestamp = 0;
-       unsigned long long finish_timestamp = 0;
-       unsigned long long time_taken_in_ns = 0;
-
-       result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
-       if (result == DC_OK && edpRev >= DP_EDP_12) {
-               /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
-               enter_timestamp = dm_get_timestamp(link->ctx);
-               do {
-                       sinkstatus = 0;
-                       result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
-                       if (sinkstatus == 1)
-                               break;
-                       if (result != DC_OK)
-                               break;
-                       udelay(25);
-                       finish_timestamp = dm_get_timestamp(link->ctx);
-                       time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
-               } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
-       }
-
-       if (link->local_sink &&
-                       link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
-               udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
-
-       return result;
-}
-
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
-               enum signal_type signal)
-{
-       struct dc  *dc = link->ctx->dc;
-       struct dmcu *dmcu = dc->res_pool->dmcu;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc;
-#endif
-       struct link_encoder *link_enc;
-
-       /* Link should always be assigned encoder when en-/disabling. */
-       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
-       else
-               link_enc = link->link_enc;
-       ASSERT(link_enc);
-
-       if (!link->wa_flags.dp_keep_receiver_powered)
-               dp_receiver_power_ctrl(link, false);
-
-       if (signal == SIGNAL_TYPE_EDP) {
-               if (link->dc->hwss.edp_backlight_control)
-                       link->dc->hwss.edp_backlight_control(link, false);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
-                       disable_dp_hpo_output(link, link_res, signal);
-               else
-                       link_enc->funcs->disable_output(link_enc, signal);
-#else
-               link_enc->funcs->disable_output(link_enc, signal);
-#endif
-               link->dc->hwss.edp_power_control(link, false);
-       } else {
-               if (dmcu != NULL && dmcu->funcs->lock_phy)
-                       dmcu->funcs->lock_phy(dmcu);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
-                               hpo_link_enc)
-                       disable_dp_hpo_output(link, link_res, signal);
-               else
-                       link_enc->funcs->disable_output(link_enc, signal);
-#else
-               link_enc->funcs->disable_output(link_enc, signal);
-#endif
-               if (dmcu != NULL && dmcu->funcs->unlock_phy)
-                       dmcu->funcs->unlock_phy(dmcu);
-       }
-
-       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
-
-       /* Clear current link setting.*/
-       memset(&link->cur_link_settings, 0,
-                       sizeof(link->cur_link_settings));
-
-       if (dc->clk_mgr->funcs->notify_link_rate_change)
-               dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-}
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
-               enum signal_type signal)
-{
-       /* MST disable link only when no stream use the link */
-       if (link->mst_stream_alloc_table.stream_count > 0)
-               return;
-
-       dp_disable_link_phy(link, link_res, signal);
-
-       /* set the sink to SST mode after disabling the link */
-       dp_enable_mst_on_sink(link, false);
-}
-
-bool dp_set_hw_training_pattern(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum dc_dp_training_pattern pattern,
-       uint32_t offset)
-{
-       enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
-
-       switch (pattern) {
-       case DP_TRAINING_PATTERN_SEQUENCE_1:
-               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
-               break;
-       case DP_TRAINING_PATTERN_SEQUENCE_2:
-               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
-               break;
-       case DP_TRAINING_PATTERN_SEQUENCE_3:
-               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
-               break;
-       case DP_TRAINING_PATTERN_SEQUENCE_4:
-               test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
-               break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       case DP_128b_132b_TPS1:
-               test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
-               break;
-       case DP_128b_132b_TPS2:
-               test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
-               break;
-#endif
-       default:
-               break;
-       }
-
-       dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
-
-       return true;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-#define DC_LOGGER \
-       link->ctx->logger
-#endif
-void dp_set_hw_lane_settings(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       const struct link_training_settings *link_settings,
-       uint32_t offset)
-{
-       struct link_encoder *encoder = link->link_enc;
-
-       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
-               return;
-
-       /* call Encoder to set lane settings */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dp_get_link_encoding_format(&link_settings->link_settings) ==
-                       DP_128b_132b_ENCODING) {
-               link_res->hpo_dp_link_enc->funcs->set_ffe(
-                               link_res->hpo_dp_link_enc,
-                               &link_settings->link_settings,
-                               link_settings->lane_settings[0].FFE_PRESET.raw);
-       } else if (dp_get_link_encoding_format(&link_settings->link_settings)
-                       == DP_8b_10b_ENCODING) {
-               encoder->funcs->dp_set_lane_settings(encoder, link_settings);
-       }
-#else
-       encoder->funcs->dp_set_lane_settings(encoder, link_settings);
-#endif
-       memmove(link->cur_lane_setting,
-                       link_settings->lane_settings,
-                       sizeof(link->cur_lane_setting));
-}
-
-void dp_set_hw_test_pattern(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum dp_test_pattern test_pattern,
-       uint8_t *custom_pattern,
-       uint32_t custom_pattern_size)
-{
-       struct encoder_set_dp_phy_pattern_param pattern_param = {0};
-       struct link_encoder *encoder;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings);
-#endif
-
-       /* Access link encoder based on whether it is statically
-        * or dynamically assigned to a link.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       else
-               encoder = link->link_enc;
-
-       pattern_param.dp_phy_pattern = test_pattern;
-       pattern_param.custom_pattern = custom_pattern;
-       pattern_param.custom_pattern_size = custom_pattern_size;
-       pattern_param.dp_panel_mode = dp_get_panel_mode(link);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       switch (link_encoding_format) {
-       case DP_128b_132b_ENCODING:
-               link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
-                               link_res->hpo_dp_link_enc, &pattern_param);
-               break;
-       case DP_8b_10b_ENCODING:
-               ASSERT(encoder);
-               encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
-               break;
-       default:
-               DC_LOG_ERROR("%s: Unknown link encoding format.", __func__);
-               break;
-       }
-#else
-       encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
-#endif
-       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
-}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-#undef DC_LOGGER
-#endif
-
-void dp_retrain_link_dp_test(struct dc_link *link,
-                       struct dc_link_settings *link_setting,
-                       bool skip_video_pattern)
-{
-       struct pipe_ctx *pipes =
-                       &link->dc->current_state->res_ctx.pipe_ctx[0];
-       unsigned int i;
-
-       for (i = 0; i < MAX_PIPES; i++) {
-               if (pipes[i].stream != NULL &&
-                       !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
-                       pipes[i].stream->link != NULL &&
-                       pipes[i].stream_res.stream_enc != NULL &&
-                       pipes[i].stream->link == link) {
-                       udelay(100);
-
-                       pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
-                                       pipes[i].stream_res.stream_enc);
-
-                       /* disable any test pattern that might be active */
-                       dp_set_hw_test_pattern(link, &pipes[i].link_res,
-                                       DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
-                       dp_receiver_power_ctrl(link, false);
-
-                       link->dc->hwss.disable_stream(&pipes[i]);
-                       if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
-                               (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
-
-                       if (link->link_enc)
-                               link->link_enc->funcs->disable_output(
-                                               link->link_enc,
-                                               SIGNAL_TYPE_DISPLAY_PORT);
-
-                       /* Clear current link setting. */
-                       memset(&link->cur_link_settings, 0,
-                               sizeof(link->cur_link_settings));
-
-                       perform_link_training_with_retries(
-                                       link_setting,
-                                       skip_video_pattern,
-                                       LINK_TRAINING_ATTEMPTS,
-                                       &pipes[i],
-                                       SIGNAL_TYPE_DISPLAY_PORT,
-                                       false);
-
-                       link->dc->hwss.enable_stream(&pipes[i]);
-
-                       link->dc->hwss.unblank_stream(&pipes[i],
-                                       link_setting);
-
-                       if (pipes[i].stream_res.audio) {
-                               /* notify audio driver for
-                                * audio modes of monitor */
-                               pipes[i].stream_res.audio->funcs->az_enable(
-                                               pipes[i].stream_res.audio);
-
-                               /* un-mute audio */
-                               /* TODO: audio should be per stream rather than
-                                * per link */
-                               pipes[i].stream_res.stream_enc->funcs->
-                               audio_mute_control(
-                                       pipes[i].stream_res.stream_enc, false);
-                       }
-               }
-       }
-}
-
-#define DC_LOGGER \
-       dsc->ctx->logger
-static void dsc_optc_config_log(struct display_stream_compressor *dsc,
-               struct dsc_optc_config *config)
-{
-       uint32_t precision = 1 << 28;
-       uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
-       uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
-       uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
-
-       /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
-        * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
-        * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
-        */
-       ll_bytes_per_pix_fraq *= 10000000;
-       ll_bytes_per_pix_fraq /= precision;
-
-       DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
-                       config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
-       DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
-       DC_LOG_DSC("\tslice_width %d", config->slice_width);
-}
-
-bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
-{
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       bool result = false;
-
-       if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
-               result = true;
-       else
-               result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
-       return result;
-}
-
-/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
- * i.e. after dp_enable_dsc_on_rx() had been called
- */
-void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
-{
-       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       struct pipe_ctx *odm_pipe;
-       int opp_cnt = 1;
-
-       for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
-               opp_cnt++;
-
-       if (enable) {
-               struct dsc_config dsc_cfg;
-               struct dsc_optc_config dsc_optc_cfg;
-               enum optc_dsc_mode optc_dsc_mode;
-
-               /* Enable DSC hw block */
-               dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
-               dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
-               dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
-               dsc_cfg.color_depth = stream->timing.display_color_depth;
-               dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
-               dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
-               ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
-               dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
-               dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
-               dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
-               for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
-                       struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
-
-                       odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
-                       odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
-               }
-               dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
-               dsc_cfg.pic_width *= opp_cnt;
-
-               optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
-
-               /* Enable DSC in encoder */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
-                               && !is_dp_128b_132b_signal(pipe_ctx)) {
-#else
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-#endif
-                       DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
-                       dsc_optc_config_log(dsc, &dsc_optc_cfg);
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
-                                                                       optc_dsc_mode,
-                                                                       dsc_optc_cfg.bytes_per_pixel,
-                                                                       dsc_optc_cfg.slice_width);
-
-                       /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
-               }
-
-               /* Enable DSC in OPTC */
-               DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
-               dsc_optc_config_log(dsc, &dsc_optc_cfg);
-               pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
-                                                       optc_dsc_mode,
-                                                       dsc_optc_cfg.bytes_per_pixel,
-                                                       dsc_optc_cfg.slice_width);
-       } else {
-               /* disable DSC in OPTC */
-               pipe_ctx->stream_res.tg->funcs->set_dsc_config(
-                               pipe_ctx->stream_res.tg,
-                               OPTC_DSC_DISABLED, 0, 0);
-
-               /* disable DSC in stream encoder */
-               if (dc_is_dp_signal(stream->signal)) {
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       if (is_dp_128b_132b_signal(pipe_ctx))
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
-                                                                               false,
-                                                                               NULL,
-                                                                               true);
-                       else
-#endif
-                               if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-                                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
-                                                       pipe_ctx->stream_res.stream_enc,
-                                                       OPTC_DSC_DISABLED, 0, 0);
-                                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                               pipe_ctx->stream_res.stream_enc, false, NULL, true);
-                               }
-               }
-
-               /* disable DSC block */
-               pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
-               for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
-                       odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
-       }
-}
-
-bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
-{
-       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-       bool result = false;
-
-       if (!pipe_ctx->stream->timing.flags.DSC)
-               goto out;
-       if (!dsc)
-               goto out;
-
-       if (enable) {
-               {
-                       dp_set_dsc_on_stream(pipe_ctx, true);
-                       result = true;
-               }
-       } else {
-               dp_set_dsc_on_rx(pipe_ctx, false);
-               dp_set_dsc_on_stream(pipe_ctx, false);
-               result = true;
-       }
-out:
-       return result;
-}
-
-/*
- * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
- * hence PPS info packet update need to use frame update instead of immediate update.
- * Added parameter immediate_update for this purpose.
- * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
- * which is the only place where a "false" would be passed in for param immediate_update.
- *
- * immediate_update is only applicable when DSC is enabled.
- */
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
-{
-       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-
-       if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
-               return false;
-
-       if (enable) {
-               struct dsc_config dsc_cfg;
-               uint8_t dsc_packed_pps[128];
-
-               memset(&dsc_cfg, 0, sizeof(dsc_cfg));
-               memset(dsc_packed_pps, 0, 128);
-
-               /* Enable DSC hw block */
-               dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-               dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
-               dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
-               dsc_cfg.color_depth = stream->timing.display_color_depth;
-               dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
-               dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
-
-               DC_LOG_DSC(" ");
-               dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
-               if (dc_is_dp_signal(stream->signal)) {
-                       DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       if (is_dp_128b_132b_signal(pipe_ctx))
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
-                                                                               true,
-                                                                               &dsc_packed_pps[0],
-                                                                               immediate_update);
-                       else
-#endif
-                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                                               pipe_ctx->stream_res.stream_enc,
-                                                                               true,
-                                                                               &dsc_packed_pps[0],
-                                                                               immediate_update);
-               }
-       } else {
-               /* disable DSC PPS in stream encoder */
-               if (dc_is_dp_signal(stream->signal)) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       if (is_dp_128b_132b_signal(pipe_ctx))
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                                               pipe_ctx->stream_res.hpo_dp_stream_enc,
-                                                                               false,
-                                                                               NULL,
-                                                                               true);
-                       else
-#endif
-                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                                       pipe_ctx->stream_res.stream_enc, false, NULL, true);
-               }
-       }
-
-       return true;
-}
-
-
-bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
-{
-       struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-
-       if (!pipe_ctx->stream->timing.flags.DSC)
-               return false;
-       if (!dsc)
-               return false;
-
-       dp_set_dsc_on_stream(pipe_ctx, true);
-       dp_set_dsc_pps_sdp(pipe_ctx, true, false);
-       return true;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-#undef DC_LOGGER
-#define DC_LOGGER \
-       link->ctx->logger
-
-static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
-{
-       switch (link->link_enc->transmitter) {
-       case TRANSMITTER_UNIPHY_A:
-               return PHYD32CLKA;
-       case TRANSMITTER_UNIPHY_B:
-               return PHYD32CLKB;
-       case TRANSMITTER_UNIPHY_C:
-               return PHYD32CLKC;
-       case TRANSMITTER_UNIPHY_D:
-               return PHYD32CLKD;
-       case TRANSMITTER_UNIPHY_E:
-               return PHYD32CLKE;
-       default:
-               return PHYD32CLKA;
-       }
-}
-
-void enable_dp_hpo_output(struct dc_link *link,
-               const struct link_resource *link_res,
-               const struct dc_link_settings *link_settings)
-{
-       const struct dc *dc = link->dc;
-       enum phyd32clk_clock_source phyd32clk;
-
-       /* Enable PHY PLL at target bit rate
-        *   UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz)
-        *   UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz)
-        *   UHBR20 = 20Gbps (SYMCLK32 = 625MHz)
-        */
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-               switch (link_settings->link_rate) {
-               case LINK_RATE_UHBR10:
-                       dm_set_phyd32clk(dc->ctx, 312500);
-                       break;
-               case LINK_RATE_UHBR13_5:
-                       dm_set_phyd32clk(dc->ctx, 412875);
-                       break;
-               case LINK_RATE_UHBR20:
-                       dm_set_phyd32clk(dc->ctx, 625000);
-                       break;
-               default:
-                       return;
-               }
-       } else {
-               /* DP2.0 HW: call transmitter control to enable PHY */
-               link_res->hpo_dp_link_enc->funcs->enable_link_phy(
-                               link_res->hpo_dp_link_enc,
-                               link_settings,
-                               link->link_enc->transmitter,
-                               link->link_enc->hpd_source);
-       }
-
-       /* DCCG muxing and DTBCLK DTO */
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-               dc->res_pool->dccg->funcs->set_physymclk(
-                               dc->res_pool->dccg,
-                               link->link_enc_hw_inst,
-                               PHYSYMCLK_FORCE_SRC_PHYD32CLK,
-                               true);
-
-               phyd32clk = get_phyd32clk_src(link);
-               dc->res_pool->dccg->funcs->enable_symclk32_le(
-                               dc->res_pool->dccg,
-                               link_res->hpo_dp_link_enc->inst,
-                               phyd32clk);
-               link_res->hpo_dp_link_enc->funcs->link_enable(
-                               link_res->hpo_dp_link_enc,
-                               link_settings->lane_count);
-       }
-}
-
-void disable_dp_hpo_output(struct dc_link *link,
-               const struct link_resource *link_res,
-               enum signal_type signal)
-{
-       const struct dc *dc = link->dc;
-
-       link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
-
-       if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-               dc->res_pool->dccg->funcs->disable_symclk32_le(
-                                       dc->res_pool->dccg,
-                                       link_res->hpo_dp_link_enc->inst);
-
-               dc->res_pool->dccg->funcs->set_physymclk(
-                                       dc->res_pool->dccg,
-                                       link->link_enc_hw_inst,
-                                       PHYSYMCLK_FORCE_SRC_SYMCLK,
-                                       false);
-
-               dm_set_phyd32clk(dc->ctx, 0);
-       } else {
-               /* DP2.0 HW: call transmitter control to disable PHY */
-               link_res->hpo_dp_link_enc->funcs->disable_link_phy(
-                               link_res->hpo_dp_link_enc,
-                               signal);
-       }
-}
-
-void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable)
-{
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
-       struct pipe_ctx *odm_pipe;
-       int odm_combine_num_segments = 1;
-       enum phyd32clk_clock_source phyd32clk;
-
-       if (enable) {
-               for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
-                               odm_combine_num_segments++;
-
-               dc->res_pool->dccg->funcs->set_dpstreamclk(
-                               dc->res_pool->dccg,
-                               DTBCLK0,
-                               pipe_ctx->stream_res.tg->inst);
-
-               phyd32clk = get_phyd32clk_src(stream->link);
-               dc->res_pool->dccg->funcs->enable_symclk32_se(
-                               dc->res_pool->dccg,
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
-                               phyd32clk);
-
-               dc->res_pool->dccg->funcs->set_dtbclk_dto(
-                               dc->res_pool->dccg,
-                               pipe_ctx->stream_res.tg->inst,
-                               stream->phy_pix_clk,
-                               odm_combine_num_segments,
-                               &stream->timing);
-       } else {
-               dc->res_pool->dccg->funcs->set_dtbclk_dto(
-                               dc->res_pool->dccg,
-                               pipe_ctx->stream_res.tg->inst,
-                               0,
-                               0,
-                               &stream->timing);
-               dc->res_pool->dccg->funcs->disable_symclk32_se(
-                               dc->res_pool->dccg,
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->inst);
-               dc->res_pool->dccg->funcs->set_dpstreamclk(
-                               dc->res_pool->dccg,
-                               REFCLK,
-                               pipe_ctx->stream_res.tg->inst);
-       }
-}
-
-void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link)
-{
-       const struct dc *dc = link->dc;
-       struct dc_state *state = dc->current_state;
-       uint8_t i;
-
-       for (i = 0; i < MAX_PIPES; i++) {
-               if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc &&
-                               state->res_ctx.pipe_ctx[i].stream &&
-                               state->res_ctx.pipe_ctx[i].stream->link == link &&
-                               !state->res_ctx.pipe_ctx[i].stream->dpms_off) {
-                       setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false);
-               }
-       }
-}
-
-#undef DC_LOGGER
-#endif
index b3912ff9dc91135b5aceb37972876a44bba7a42e..71b393194c55a819ca141e932d683ced0221147c 100644 (file)
 #include "dpcd_defs.h"
 #include "link_enc_cfg.h"
 #include "dc_link_dp.h"
+#include "virtual/virtual_link_hwss.h"
+#include "link/link_hwss_dio.h"
+#include "link/link_hwss_dpia.h"
+#include "link/link_hwss_hpo_dp.h"
 
 #if defined(CONFIG_DRM_AMD_DC_SI)
 #include "dce60/dce60_resource.h"
@@ -131,7 +135,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
 
        case FAMILY_NV:
                dc_version = DCN_VERSION_2_0;
-               if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+               if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
                        dc_version = DCN_VERSION_2_01;
                        break;
                }
@@ -356,7 +360,6 @@ bool resource_construct(
                }
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        pool->hpo_dp_stream_enc_count = 0;
        if (create_funcs->create_hpo_dp_stream_encoder) {
                for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
@@ -377,7 +380,6 @@ bool resource_construct(
                        pool->hpo_dp_link_enc_count++;
                }
        }
-#endif
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        for (i = 0; i < caps->num_mpc_3dlut; i++) {
@@ -1640,6 +1642,9 @@ static bool are_stream_backends_same(
        if (is_timing_changed(stream_a, stream_b))
                return false;
 
+       if (stream_a->signal != stream_b->signal)
+               return false;
+
        if (stream_a->dpms_off != stream_b->dpms_off)
                return false;
 
@@ -1710,7 +1715,6 @@ static void update_stream_engine_usage(
        }
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void update_hpo_dp_stream_engine_usage(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
@@ -1812,7 +1816,6 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
                pipe_ctx->link_res.hpo_dp_link_enc = NULL;
        }
 }
-#endif
 
 /* TODO: release audio object */
 void update_audio_usage(
@@ -1858,7 +1861,6 @@ static int acquire_first_free_pipe(
        return -1;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
@@ -1876,7 +1878,6 @@ static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for
 
        return NULL;
 }
-#endif
 
 static struct audio *find_first_free_audio(
                struct resource_context *res_ctx,
@@ -1964,11 +1965,6 @@ enum dc_status dc_remove_stream_from_ctx(
                                dc->res_pool,
                        del_pipe->stream_res.stream_enc,
                        false);
-       /* Release link encoder from stream in new dc_state. */
-       if (dc->res_pool->funcs->link_enc_unassign)
-               dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(del_pipe)) {
                update_hpo_dp_stream_engine_usage(
                        &new_ctx->res_ctx, dc->res_pool,
@@ -1976,7 +1972,6 @@ enum dc_status dc_remove_stream_from_ctx(
                        false);
                remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream);
        }
-#endif
 
        if (del_pipe->stream_res.audio)
                update_audio_usage(
@@ -2173,7 +2168,7 @@ static void mark_seamless_boot_stream(
 
        if (dc->config.allow_seamless_boot_optimization &&
                        !dcb->funcs->is_accelerated_mode(dcb)) {
-               if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+               if (dc_validate_boot_timing(dc, stream->sink, &stream->timing))
                        stream->apply_seamless_boot_optimization = true;
        }
 }
@@ -2229,7 +2224,6 @@ enum dc_status resource_map_pool_resources(
                pipe_ctx->stream_res.stream_enc,
                true);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* Allocate DP HPO Stream Encoder based on signal, hw capabilities
         * and link settings
         */
@@ -2254,7 +2248,6 @@ enum dc_status resource_map_pool_resources(
                                return DC_NO_LINK_ENC_RESOURCE;
                }
        }
-#endif
 
        /* TODO: Add check if ASIC support and EDID audio */
        if (!stream->converter_disable_audio &&
@@ -2924,12 +2917,10 @@ bool pipe_need_reprogram(
        if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
                return true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
                return true;
        if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc)
                return true;
-#endif
 
        /* DIG link encoder resource assignment for stream changed. */
        if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
@@ -3196,10 +3187,9 @@ void get_audio_check(struct audio_info *aud_modes,
        }
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
+static struct hpo_dp_link_encoder *get_temp_hpo_dp_link_enc(
                const struct resource_context *res_ctx,
-               const struct resource_pool *pool,
+               const struct resource_pool *const pool,
                const struct dc_link *link)
 {
        struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL;
@@ -3215,7 +3205,24 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
 
        return hpo_dp_link_enc;
 }
-#endif
+
+bool get_temp_dp_link_res(struct dc_link *link,
+               struct link_resource *link_res,
+               struct dc_link_settings *link_settings)
+{
+       const struct dc *dc  = link->dc;
+       const struct resource_context *res_ctx = &dc->current_state->res_ctx;
+
+       memset(link_res, 0, sizeof(*link_res));
+
+       if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+               link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
+                               dc->res_pool, link);
+               if (!link_res->hpo_dp_link_enc)
+                       return false;
+       }
+       return true;
+}
 
 void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
                struct dc_state *context)
@@ -3303,3 +3310,36 @@ uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter tr
 #endif
        return phy_idx;
 }
+
+const struct link_hwss *get_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res)
+{
+       /* Link_hwss is only accessible by getter function instead of accessing
+        * by pointers in dc with the intent to protect against breaking polymorphism.
+        */
+       if (can_use_hpo_dp_link_hwss(link, link_res))
+               /* TODO: some assumes that if decided link settings is 128b/132b
+                * channel coding format hpo_dp_link_enc should be used.
+                * Others believe that if hpo_dp_link_enc is available in link
+                * resource then hpo_dp_link_enc must be used. This bound between
+                * hpo_dp_link_enc != NULL and decided link settings is loosely coupled
+                * with a premise that both hpo_dp_link_enc pointer and decided link
+                * settings are determined based on single policy function like
+                * "decide_link_settings" from upper layer. This "convention"
+                * cannot be maintained and enforced at current level.
+                * Therefore a refactor is due so we can enforce a strong bound
+                * between those two parameters at this level.
+                *
+                * To put it simple, we want to make enforcement at low level so that
+                * we will not return link hwss if caller plans to do 8b/10b
+                * with an hpo encoder. Or we can return a very dummy one that doesn't
+                * do work for all functions
+                */
+               return get_hpo_dp_link_hwss();
+       else if (can_use_dpia_link_hwss(link, link_res))
+               return get_dpia_link_hwss();
+       else if (can_use_dio_link_hwss(link, link_res))
+               return get_dio_link_hwss();
+       else
+               return get_virtual_link_hwss();
+}
index 288e7b01f56174cbcb33932a3f5a8d6d5928a448..fcb021bf0c96ab1f87da8d6ffab8a82eaac9c066 100644 (file)
@@ -47,7 +47,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.167"
+#define DC_VER "3.2.172"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -195,13 +195,12 @@ struct dc_caps {
        unsigned int cursor_cache_size;
        struct dc_plane_cap planes[MAX_PLANES];
        struct dc_color_caps color;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool dp_hpo;
        bool hdmi_frl_pcon_support;
-#endif
        bool edp_dsc_support;
        bool vbios_lttpr_aware;
        bool vbios_lttpr_enable;
+       uint32_t max_otg_num;
 };
 
 struct dc_bug_wa {
@@ -306,7 +305,6 @@ struct dc_cap_funcs {
 
 struct link_training_settings;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 union allow_lttpr_non_transparent_mode {
        struct {
                bool DP1_4A : 1;
@@ -314,7 +312,7 @@ union allow_lttpr_non_transparent_mode {
        } bits;
        unsigned char raw;
 };
-#endif
+
 /* Structure to hold configuration flags set by dm at dc creation. */
 struct dc_config {
        bool gpu_vm_support;
@@ -322,16 +320,12 @@ struct dc_config {
        bool fbc_support;
        bool disable_fractional_pwm;
        bool allow_seamless_boot_optimization;
-       bool power_down_display_on_boot;
+       bool seamless_boot_edp_requested;
        bool edp_not_connected;
        bool edp_no_power_sequencing;
        bool force_enum_edp;
        bool forced_clocks;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
-#else
-       bool allow_lttpr_non_transparent_mode;
-#endif
        bool multi_mon_pp_mclk_switch;
        bool disable_dmcu;
        bool enable_4to1MPC;
@@ -396,6 +390,7 @@ enum dcn_pwr_state {
 enum dcn_zstate_support_state {
        DCN_ZSTATE_SUPPORT_UNKNOWN,
        DCN_ZSTATE_SUPPORT_ALLOW,
+       DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY,
        DCN_ZSTATE_SUPPORT_DISALLOW,
 };
 #endif
@@ -518,12 +513,13 @@ union root_clock_optimization_options {
 
 union dpia_debug_options {
        struct {
-               uint32_t disable_dpia:1;
-               uint32_t force_non_lttpr:1;
-               uint32_t extend_aux_rd_interval:1;
-               uint32_t disable_mst_dsc_work_around:1;
-               uint32_t hpd_delay_in_ms:12;
-               uint32_t reserved:16;
+               uint32_t disable_dpia:1; /* bit 0 */
+               uint32_t force_non_lttpr:1; /* bit 1 */
+               uint32_t extend_aux_rd_interval:1; /* bit 2 */
+               uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
+               uint32_t hpd_delay_in_ms:12; /* bits 4-15 */
+               uint32_t disable_force_tbt3_work_around:1; /* bit 16 */
+               uint32_t reserved:15;
        } bits;
        uint32_t raw;
 };
@@ -687,13 +683,12 @@ struct dc_debug_options {
        bool disable_dsc_edp;
        unsigned int  force_dsc_edp_policy;
        bool enable_dram_clock_change_one_display_vactive;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* TODO - remove once tested */
        bool legacy_dp2_lt;
        bool set_mst_en_for_sst;
        bool disable_uhbr;
        bool force_dp2_lt_fallback_method;
-#endif
+       bool ignore_cable_id;
        union mem_low_power_enable_options enable_mem_low_power;
        union root_clock_optimization_options root_clock_optimization;
        bool hpo_optimization;
@@ -709,6 +704,7 @@ struct dc_debug_options {
        int crb_alloc_policy_min_disp_count;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool disable_z10;
+       bool enable_z9_disable_interface;
        bool enable_sw_cntl_psr;
        union dpia_debug_options dpia_debug;
 #endif
@@ -1130,7 +1126,7 @@ struct dc_validation_set {
        uint8_t plane_count;
 };
 
-bool dc_validate_seamless_boot_timing(const struct dc *dc,
+bool dc_validate_boot_timing(const struct dc *dc,
                                const struct dc_sink *sink,
                                struct dc_crtc_timing *crtc_timing);
 
@@ -1211,6 +1207,7 @@ struct dpcd_caps {
        bool is_branch_dev;
        /* Dongle's downstream count. */
        union sink_count sink_count;
+       bool is_mst_capable;
        /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
        indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
        struct dc_dongle_caps dongle_caps;
@@ -1236,12 +1233,11 @@ struct dpcd_caps {
        struct psr_caps psr_caps;
        struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
        union dp_main_line_channel_coding_cap channel_coding_cap;
        union dp_sink_video_fallback_formats fallback_formats;
        union dp_fec_capability1 fec_cap1;
-#endif
+       union dp_cable_attributes cable_attributes;
 };
 
 union dpcd_sink_ext_caps {
index 353dac420f34814c70d30f1fa6b88c619bc0309f..772084406795f65b044846f1eeed72ae281239af 100644 (file)
@@ -53,7 +53,6 @@ enum dc_link_rate {
        LINK_RATE_RBR2 = 0x0C,          // Rate_5 (RBR2)- 3.24 Gbps/Lane
        LINK_RATE_RATE_6 = 0x10,        // Rate_6               - 4.32 Gbps/Lane
        LINK_RATE_HIGH2 = 0x14,         // Rate_7 (HBR2)- 5.40 Gbps/Lane
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        LINK_RATE_HIGH3 = 0x1E,         // Rate_8 (HBR3)- 8.10 Gbps/Lane
        /* Starting from DP2.0 link rate enum directly represents actual
         * link rate value in unit of 10 mbps
@@ -61,9 +60,6 @@ enum dc_link_rate {
        LINK_RATE_UHBR10 = 1000,        // UHBR10 - 10.0 Gbps/Lane
        LINK_RATE_UHBR13_5 = 1350,      // UHBR13.5 - 13.5 Gbps/Lane
        LINK_RATE_UHBR20 = 2000,        // UHBR10 - 20.0 Gbps/Lane
-#else
-       LINK_RATE_HIGH3 = 0x1E          // Rate_8 (HBR3)- 8.10 Gbps/Lane
-#endif
 };
 
 enum dc_link_spread {
@@ -100,7 +96,6 @@ enum dc_post_cursor2 {
        POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum dc_dp_ffe_preset_level {
        DP_FFE_PRESET_LEVEL0 = 0,
        DP_FFE_PRESET_LEVEL1,
@@ -120,7 +115,6 @@ enum dc_dp_ffe_preset_level {
        DP_FFE_PRESET_LEVEL15,
        DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15,
 };
-#endif
 
 enum dc_dp_training_pattern {
        DP_TRAINING_PATTERN_SEQUENCE_1 = 0,
@@ -128,19 +122,15 @@ enum dc_dp_training_pattern {
        DP_TRAINING_PATTERN_SEQUENCE_3,
        DP_TRAINING_PATTERN_SEQUENCE_4,
        DP_TRAINING_PATTERN_VIDEOIDLE,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DP_128b_132b_TPS1,
        DP_128b_132b_TPS2,
        DP_128b_132b_TPS2_CDS,
-#endif
 };
 
 enum dp_link_encoding {
        DP_UNKNOWN_ENCODING = 0,
        DP_8b_10b_ENCODING = 1,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DP_128b_132b_ENCODING = 2,
-#endif
 };
 
 struct dc_link_settings {
@@ -152,7 +142,6 @@ struct dc_link_settings {
        bool dpcd_source_device_specific_field_support;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 union dc_dp_ffe_preset {
        struct {
                uint8_t level           : 4;
@@ -163,24 +152,19 @@ union dc_dp_ffe_preset {
        } settings;
        uint8_t raw;
 };
-#endif
 
 struct dc_lane_settings {
        enum dc_voltage_swing VOLTAGE_SWING;
        enum dc_pre_emphasis PRE_EMPHASIS;
        enum dc_post_cursor2 POST_CURSOR2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union dc_dp_ffe_preset FFE_PRESET;
-#endif
 };
 
 struct dc_link_training_overrides {
        enum dc_voltage_swing *voltage_swing;
        enum dc_pre_emphasis *pre_emphasis;
        enum dc_post_cursor2 *post_cursor2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union dc_dp_ffe_preset *ffe_preset;
-#endif
 
        uint16_t *cr_pattern_time;
        uint16_t *eq_pattern_time;
@@ -194,7 +178,6 @@ struct dc_link_training_overrides {
        bool *fec_enable;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 union payload_table_update_status {
        struct {
                uint8_t  VC_PAYLOAD_TABLE_UPDATED:1;
@@ -202,7 +185,6 @@ union payload_table_update_status {
        } bits;
        uint8_t  raw;
 };
-#endif
 
 union dpcd_rev {
        struct {
@@ -291,14 +273,10 @@ union lane_align_status_updated {
        struct {
                uint8_t INTERLANE_ALIGN_DONE:1;
                uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1;
                uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1;
                uint8_t LT_FAILED_128b_132b:1;
                uint8_t RESERVED:1;
-#else
-               uint8_t RESERVED:4;
-#endif
                uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
                uint8_t LINK_STATUS_UPDATED:1;
        } bits;
@@ -311,12 +289,10 @@ union lane_adjust {
                uint8_t PRE_EMPHASIS_LANE:2;
                uint8_t RESERVED:4;
        } bits;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct {
                uint8_t PRESET_VALUE    :4;
                uint8_t RESERVED        :4;
        } tx_ffe;
-#endif
        uint8_t raw;
 };
 
@@ -346,12 +322,10 @@ union dpcd_training_lane {
                uint8_t MAX_PRE_EMPHASIS_REACHED:1;
                uint8_t RESERVED:2;
        } bits;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct {
                uint8_t PRESET_VALUE    :4;
                uint8_t RESERVED        :4;
        } tx_ffe;
-#endif
        uint8_t raw;
 };
 
@@ -665,18 +639,9 @@ union test_response {
 
 union phy_test_pattern {
        struct {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                /* This field is 7 bits for DP2.0 */
                uint8_t PATTERN     :7;
                uint8_t RESERVED    :1;
-#else
-               /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
-                * and 3 bits for DP1.2.
-                */
-               uint8_t PATTERN     :3;
-               /* BY speci, bit7:2 is 0 for DP1.1. */
-               uint8_t RESERVED    :5;
-#endif
        } bits;
        uint8_t raw;
 };
@@ -754,14 +719,10 @@ union dpcd_fec_capability {
                uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
                uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
                uint8_t BIT_ERROR_COUNT_CAPABLE:1;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1;
                uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1;
                uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1;
                uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1;
-#else
-               uint8_t RESERVED:4;
-#endif
        } bits;
        uint8_t raw;
 };
@@ -925,7 +886,6 @@ struct dpcd_usb4_dp_tunneling_info {
        uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP
 #define DP_MAIN_LINK_CHANNEL_CODING_CAP                        0x006
 #endif
@@ -941,6 +901,9 @@ struct dpcd_usb4_dp_tunneling_info {
 #ifndef DP_LINK_SQUARE_PATTERN
 #define DP_LINK_SQUARE_PATTERN                         0x10F
 #endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX            0x110
+#endif
 #ifndef DP_DSC_CONFIGURATION
 #define DP_DSC_CONFIGURATION                           0x161
 #endif
@@ -953,6 +916,9 @@ struct dpcd_usb4_dp_tunneling_info {
 #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
 #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL          0x2216
 #endif
+#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
+#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX            0x2217
+#endif
 #ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
 #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0              0X2230
 #endif
@@ -994,8 +960,8 @@ struct dpcd_usb4_dp_tunneling_info {
 #endif
 #ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION
 #define DP_INTRA_HOP_AUX_REPLY_INDICATION              (1 << 3)
-#endif
 /* TODO - Use DRM header to replace above once available */
+#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
 
 union dp_main_line_channel_coding_cap {
        struct {
@@ -1052,6 +1018,16 @@ union dp_fec_capability1 {
        uint8_t raw;
 };
 
+union dp_cable_attributes {
+       struct {
+               uint8_t UHBR10_20_CAPABILITY    :2;
+               uint8_t UHBR13_5_CAPABILITY     :1;
+               uint8_t CABLE_TYPE              :3;
+               uint8_t RESERVED                :2;
+       } bits;
+       uint8_t raw;
+};
+
 struct dp_color_depth_caps {
        uint8_t support_6bpc    :1;
        uint8_t support_8bpc    :1;
@@ -1091,6 +1067,5 @@ union dp_128b_132b_training_aux_rd_interval {
        } bits;
        uint8_t raw;
 };
-#endif
 
 #endif /* DC_DP_TYPES_H */
index ab6bc5d79012559c032dac38ae8d680714c7c9d8..f43cce16bb6ce78288e64efc5a835988637bcab3 100644 (file)
@@ -588,6 +588,66 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
        return reg_val;
 }
 
+
+uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
+               uint32_t index, uint32_t reg_val, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+               ...)
+{
+       uint32_t shift, mask, field_value;
+       int i = 1;
+
+       va_list ap;
+
+       va_start(ap, field_value1);
+
+       reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+       while (i < n) {
+               shift = va_arg(ap, uint32_t);
+               mask = va_arg(ap, uint32_t);
+               field_value = va_arg(ap, uint32_t);
+
+               reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+               i++;
+       }
+
+       dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val);
+       va_end(ap);
+
+       return reg_val;
+}
+
+uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
+               uint32_t index, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+               ...)
+{
+       uint32_t shift, mask, *field_value;
+       uint32_t value = 0;
+       int i = 1;
+
+       va_list ap;
+
+       va_start(ap, field_value1);
+
+       value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index);
+       *field_value1 = get_reg_field_value_ex(value, mask1, shift1);
+
+       while (i < n) {
+               shift = va_arg(ap, uint32_t);
+               mask = va_arg(ap, uint32_t);
+               field_value = va_arg(ap, uint32_t *);
+
+               *field_value = get_reg_field_value_ex(value, mask, shift);
+               i++;
+       }
+
+       va_end(ap);
+
+       return value;
+}
+
 void reg_sequence_start_gather(const struct dc_context *ctx)
 {
        /* if reg sequence is supported and enabled, set flag to
index c0e37ad0e26ce90662fb8c33048c1febff4621ff..9ad3ee4079c31a3f60b0529dc1aa01213ebcc80e 100644 (file)
@@ -43,14 +43,16 @@ struct dc_link_status {
        struct dpcd_caps *dpcd_caps;
 };
 
+struct dp_receiver_status {
+       bool cable_id_updated;
+};
+
 /* DP MST stream allocation (payload bandwidth number) */
 struct link_mst_stream_allocation {
        /* DIG front */
        const struct stream_encoder *stream_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* HPO DP Stream Encoder */
        const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
-#endif
        /* associate DRM payload table with DC stream encoder */
        uint8_t vcp_id;
        /* number of slots required for the DP stream in transport packet */
@@ -197,10 +199,13 @@ struct dc_link {
                bool dp_mot_reset_segment;
                /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
                bool dpia_mst_dsc_always_on;
+               /* Forced DPIA into TBT3 compatibility mode. */
+               bool dpia_forced_tbt3_mode;
        } wa_flags;
        struct link_mst_stream_allocation_table mst_stream_alloc_table;
 
        struct dc_link_status link_status;
+       struct dp_receiver_status dprx_status;
 
        struct link_trace link_trace;
        struct gpio *hpd_gpio;
@@ -307,6 +312,7 @@ void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
  */
 enum dc_detect_reason {
        DETECT_REASON_BOOT,
+       DETECT_REASON_RESUMEFROMS3S4,
        DETECT_REASON_HPD,
        DETECT_REASON_HPDRX,
        DETECT_REASON_FALLBACK,
@@ -316,10 +322,8 @@ enum dc_detect_reason {
 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
 bool dc_link_get_hpd_state(struct dc_link *dc_link);
 enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
 enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-#endif
 
 /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
  * Return:
@@ -453,14 +457,17 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
 bool dc_link_is_fec_supported(const struct dc_link *link);
 bool dc_link_should_enable_fec(const struct dc_link *link);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
 enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
-#endif
 
-const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link);
+void dc_link_get_cur_link_res(const struct dc_link *link,
+               struct link_resource *link_res);
 /* take a snapshot of current link resource allocation state */
 void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
 /* restore link resource allocation state from a snapshot */
 void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
+void dc_link_dp_clear_rx_status(struct dc_link *link);
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+               struct graphics_object_id link_id,
+               struct gpio_service *gpio_service);
 #endif /* DC_LINK_H_ */
index e37c4a10bfd5e5ca7a4b66c5f15ae0d9f77e4811..f631b61abedd416806313118c3851f97b21dd2e5 100644 (file)
@@ -115,12 +115,10 @@ struct periodic_interrupt_config {
        int lines_offset;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 struct dc_mst_stream_bw_update {
        bool is_increase; // is bandwidth reduced or increased
        uint32_t mst_stream_bw; // new mst bandwidth in kbps
 };
-#endif
 
 union stream_update_flags {
        struct {
@@ -132,9 +130,7 @@ union stream_update_flags {
                uint32_t gamut_remap:1;
                uint32_t wb_update:1;
                uint32_t dsc_changed : 1;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                uint32_t mst_bw : 1;
-#endif
        } bits;
 
        uint32_t raw;
@@ -288,9 +284,7 @@ struct dc_stream_update {
 
        struct dc_writeback_update *wb_update;
        struct dc_dsc_config *dsc_config;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dc_mst_stream_bw_update *mst_bw_update;
-#endif
        struct dc_transfer_func *func_shaper;
        struct dc_3dlut *lut3d_func;
 
index 0285a4b38d0565940c03d1e0b88b692773f94b47..48859d5fc17256873b24b54d2e01b155bb1f2595 100644 (file)
@@ -395,14 +395,11 @@ struct dc_lttpr_caps {
        uint8_t max_link_rate;
        uint8_t phy_repeater_cnt;
        uint8_t max_ext_timeout;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
        union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
-#endif
        uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 struct dc_dongle_dfp_cap_ext {
        bool supported;
        uint16_t max_pixel_rate_in_mps;
@@ -414,7 +411,6 @@ struct dc_dongle_dfp_cap_ext {
        struct dp_color_depth_caps ycbcr422_color_depth_caps;
        struct dp_color_depth_caps ycbcr420_color_depth_caps;
 };
-#endif
 
 struct dc_dongle_caps {
        /* dongle type (DP converter, CV smart dongle) */
@@ -429,10 +425,8 @@ struct dc_dongle_caps {
        bool is_dp_hdmi_ycbcr420_converter;
        uint32_t dp_hdmi_max_bpc;
        uint32_t dp_hdmi_max_pixel_clk_in_khz;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
        struct dc_dongle_dfp_cap_ext dfp_cap_ext;
-#endif
 };
 /* Scaling format */
 enum scaling_transformation {
index 6d42a9cc9916279a519782d0ae482b5682789cbd..74b05b3aef08749a7ccaeeacc149d3199ef29e71 100644 (file)
@@ -878,7 +878,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                        default:
                                DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
                                                        LOG_FLAG_Error_I2cAux,
-                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case.");
+                                                       "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: FAILURE: AUX_TRANSACTION_REPLY_* unknown, default case. Reply: %d", *payload->reply);
                                goto fail;
                        }
                        break;
index f1c61d5aee6c6f68adfbf8bb312357493fb9cc9d..0bc41414481eddde9f980ddf7b0ca838f79434d6 100644 (file)
@@ -1325,7 +1325,8 @@ void dce110_link_encoder_disable_output(
 
 void dce110_link_encoder_dp_set_lane_settings(
        struct link_encoder *enc,
-       const struct link_training_settings *link_settings)
+       const struct dc_link_settings *link_settings,
+       const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
 {
        struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
        union dpcd_training_lane_set training_lane_set = { { 0 } };
@@ -1340,26 +1341,26 @@ void dce110_link_encoder_dp_set_lane_settings(
        cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
        cntl.transmitter = enc110->base.transmitter;
        cntl.connector_obj_id = enc110->base.connector;
-       cntl.lanes_number = link_settings->link_settings.lane_count;
+       cntl.lanes_number = link_settings->lane_count;
        cntl.hpd_sel = enc110->base.hpd_source;
-       cntl.pixel_clock = link_settings->link_settings.link_rate *
+       cntl.pixel_clock = link_settings->link_rate *
                                                LINK_RATE_REF_FREQ_IN_KHZ;
 
-       for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+       for (lane = 0; lane < link_settings->lane_count; lane++) {
                /* translate lane settings */
 
                training_lane_set.bits.VOLTAGE_SWING_SET =
-                       link_settings->lane_settings[lane].VOLTAGE_SWING;
+                               lane_settings[lane].VOLTAGE_SWING;
                training_lane_set.bits.PRE_EMPHASIS_SET =
-                       link_settings->lane_settings[lane].PRE_EMPHASIS;
+                               lane_settings[lane].PRE_EMPHASIS;
 
                /* post cursor 2 setting only applies to HBR2 link rate */
-               if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+               if (link_settings->link_rate == LINK_RATE_HIGH2) {
                        /* this is passed to VBIOS
                         * to program post cursor 2 level */
 
                        training_lane_set.bits.POST_CURSOR2_SET =
-                               link_settings->lane_settings[lane].POST_CURSOR2;
+                                       lane_settings[lane].POST_CURSOR2;
                }
 
                cntl.lane_select = lane;
index fc6ade824c231af0769a6421a0edb189545d9f00..261c70e01e331386fc466c88cf8a04681188fc60 100644 (file)
@@ -279,7 +279,8 @@ void dce110_link_encoder_disable_output(
 /* set DP lane settings */
 void dce110_link_encoder_dp_set_lane_settings(
        struct link_encoder *enc,
-       const struct link_training_settings *link_settings);
+       const struct dc_link_settings *link_settings,
+       const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
 
 void dce110_link_encoder_dp_set_phy_pattern(
        struct link_encoder *enc,
index 87ed48d5530dc7b51507350d26d0f8f2bc4efa40..8bd265b40847076999456edf6c504e39f141bb0e 100644 (file)
@@ -138,6 +138,10 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
                cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
                break;
        }
+
+       if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED)
+               return false;
+
        cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
        cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
        cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
index f3ff141b706a4d16a0de765e239419bfb434f602..ace04e2ed34ea6c39c2363ea2413557d2d626649 100644 (file)
@@ -49,9 +49,7 @@
 #include "link_enc_cfg.h"
 #include "link_hwss.h"
 #include "dc_link_dp.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #include "dccg.h"
-#endif
 #include "clock_source.h"
 #include "clk_mgr.h"
 #include "abm.h"
@@ -669,17 +667,12 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
        struct dc_link *link = pipe_ctx->stream->link;
        const struct dc *dc = link->dc;
-
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
        uint32_t active_total_with_borders;
        uint32_t early_control = 0;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
 
-       /* For MST, there are multiply stream go to only one link.
-        * connect DIG back_end to front_end while enable_stream and
-        * disconnect them during disable_stream
-        * BY this, it is logic clean to separate stream and link */
-       link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
-                                                   pipe_ctx->stream_res.stream_enc->id, true);
+       link_hwss->setup_stream_encoder(pipe_ctx);
 
        dc->hwss.update_info_frame(pipe_ctx);
 
@@ -795,7 +788,7 @@ void dce110_edp_wait_for_hpd_ready(
        dal_gpio_destroy_irq(&hpd);
 
        if (false == edp_hpd_high) {
-               DC_LOG_ERROR(
+               DC_LOG_WARNING(
                                "%s: wait timed out!\n", __func__);
        }
 }
@@ -1112,17 +1105,12 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
                        clk_mgr->funcs->enable_pme_wa(clk_mgr);
                /* un-mute audio */
                /* TODO: audio should be per stream rather than per link */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                if (is_dp_128b_132b_signal(pipe_ctx))
                        pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
                                        pipe_ctx->stream_res.hpo_dp_stream_enc, false);
                else
                        pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                                        pipe_ctx->stream_res.stream_enc, false);
-#else
-               pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                               pipe_ctx->stream_res.stream_enc, false);
-#endif
                if (pipe_ctx->stream_res.audio)
                        pipe_ctx->stream_res.audio->enabled = true;
        }
@@ -1145,32 +1133,22 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
        if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
                return;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx))
                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
                                pipe_ctx->stream_res.hpo_dp_stream_enc, true);
        else
                pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                                pipe_ctx->stream_res.stream_enc, true);
-#else
-       pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
-                       pipe_ctx->stream_res.stream_enc, true);
-#endif
        if (pipe_ctx->stream_res.audio) {
                pipe_ctx->stream_res.audio->enabled = false;
 
                if (dc_is_dp_signal(pipe_ctx->stream->signal))
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (is_dp_128b_132b_signal(pipe_ctx))
                                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
                                                pipe_ctx->stream_res.hpo_dp_stream_enc);
                        else
                                pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
                                                pipe_ctx->stream_res.stream_enc);
-#else
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
-                                       pipe_ctx->stream_res.stream_enc);
-#endif
                else
                        pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
                                        pipe_ctx->stream_res.stream_enc);
@@ -1195,7 +1173,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
        struct dc *dc = pipe_ctx->stream->ctx->dc;
-       struct link_encoder *link_enc = NULL;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
 
        if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
                pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1204,54 +1182,26 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->stream_res.stream_enc);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx)) {
                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
                                        pipe_ctx->stream_res.hpo_dp_stream_enc);
        } else if (dc_is_dp_signal(pipe_ctx->stream->signal))
-#else
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-#endif
                pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
                        pipe_ctx->stream_res.stream_enc);
 
        dc->hwss.disable_audio_stream(pipe_ctx);
 
-       /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
-       if (link->ep_type == DISPLAY_ENDPOINT_PHY)
-               link_enc = link->link_enc;
-       else if (dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
-       ASSERT(link_enc);
+       link_hwss->reset_stream_encoder(pipe_ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx)) {
-               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable(
-                               pipe_ctx->stream_res.hpo_dp_stream_enc);
-               setup_dp_hpo_stream(pipe_ctx, false);
-       /* TODO - DP2.0 HW: unmap stream from link encoder here */
-       } else {
-               if (link_enc)
-                       link_enc->funcs->connect_dig_be_to_fe(
-                               link_enc,
-                               pipe_ctx->stream_res.stream_enc->id,
-                               false);
-       }
-#else
-       if (link_enc)
-               link_enc->funcs->connect_dig_be_to_fe(
-                       link->link_enc,
-                       pipe_ctx->stream_res.stream_enc->id,
-                       false);
-#endif
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx))
-               dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false);
-#endif
-
+               /* TODO: This looks like a bug to me as we are disabling HPO IO when
+                * we are just disabling a single HPO stream. Shouldn't we disable HPO
+                * HW control only when HPOs for all streams are disabled?
+                */
+               if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
+                       pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
+                                       pipe_ctx->stream->ctx->dc->hwseq, false);
+       }
 }
 
 void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1285,15 +1235,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
                link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (is_dp_128b_132b_signal(pipe_ctx)) {
                /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
                                pipe_ctx->stream_res.hpo_dp_stream_enc);
        } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
-#else
-       if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
-#endif
                pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
 
                if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) {
@@ -1535,7 +1481,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
                build_audio_output(context, pipe_ctx, &audio_output);
 
                if (dc_is_dp_signal(pipe_ctx->stream->signal))
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                        if (is_dp_128b_132b_signal(pipe_ctx))
                                pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
                                                pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -1546,12 +1491,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
                                                pipe_ctx->stream_res.stream_enc,
                                                pipe_ctx->stream_res.audio->inst,
                                                &pipe_ctx->stream->audio_info);
-#else
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
-                                       pipe_ctx->stream_res.stream_enc,
-                                       pipe_ctx->stream_res.audio->inst,
-                                       &pipe_ctx->stream->audio_info);
-#endif
                else
                        pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
                                        pipe_ctx->stream_res.stream_enc,
@@ -1570,14 +1509,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic)
                check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* DCN3.1 FPGA Workaround
         * Need to enable HPO DP Stream Encoder before setting OTG master enable.
         * To do so, move calling function enable_stream_timing to only be done AFTER calling
         * function core_link_enable_stream
         */
        if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
-#endif
                /*  */
                /* Do not touch stream timing on seamless boot optimization. */
                if (!pipe_ctx->stream->apply_seamless_boot_optimization)
@@ -1608,11 +1545,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
                        pipe_ctx->stream_res.stream_enc,
                        pipe_ctx->stream_res.tg->inst);
 
-       if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
-               pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
-               pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
-                       pipe_ctx->stream_res.stream_enc);
-
        if (dc_is_dp_signal(pipe_ctx->stream->signal))
                dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
 
@@ -1643,7 +1575,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
        if (!stream->dpms_off)
                core_link_enable_stream(context, pipe_ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* DCN3.1 FPGA Workaround
         * Need to enable HPO DP Stream Encoder before setting OTG master enable.
         * To do so, move calling function enable_stream_timing to only be done AFTER calling
@@ -1653,7 +1584,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
                if (!pipe_ctx->stream->apply_seamless_boot_optimization)
                        hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
        }
-#endif
 
        pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
 
@@ -1831,7 +1761,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                            edp_link->link_status.link_active) {
                                struct dc_stream_state *edp_stream = edp_streams[0];
 
-                               can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+                               can_apply_edp_fast_boot = dc_validate_boot_timing(dc,
+                                       edp_stream->sink, &edp_stream->timing);
                                edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
                                if (can_apply_edp_fast_boot)
                                        DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
@@ -1839,9 +1770,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                                break;
                        }
                }
-               // We are trying to enable eDP, don't power down VDD
-               if (can_apply_edp_fast_boot)
+
+               /*
+                * TO-DO: So far the code logic below only addresses single eDP case.
+                * For dual eDP case, there are a few things that need to be
+                * implemented first:
+                *
+                * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+                * stream[0 or 1] will all be checked.
+                *
+                * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+                * for each eDP.
+                *
+                * Once above 2 things are completed, we can then change the logic below
+                * correspondingly, so dual eDP case will be fully covered.
+                */
+
+               // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+               if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
                        keep_edp_vdd_on = true;
+                       DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+               } else {
+                       DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+               }
        }
 
        // Check seamless boot support
@@ -2233,8 +2184,6 @@ static void dce110_setup_audio_dto(
 
                        build_audio_output(context, pipe_ctx, &audio_output);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-                       /* For DCN3.1, audio to HPO FRL encoder is using audio DTBCLK DTO */
                        if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) {
                                /* disable audio DTBCLK DTO */
                                dc->res_pool->dccg->funcs->set_audio_dtbclk_dto(
@@ -2251,13 +2200,6 @@ static void dce110_setup_audio_dto(
                                        pipe_ctx->stream->signal,
                                        &audio_output.crtc_info,
                                        &audio_output.pll_info);
-#else
-                       pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
-                               pipe_ctx->stream_res.audio,
-                               pipe_ctx->stream->signal,
-                               &audio_output.crtc_info,
-                               &audio_output.pll_info);
-#endif
                        break;
                }
        }
index 530a72e3eefe29b9d767c8784e0b6b0b864d89d4..915eecb4078838dcecee1dcf63145e10a7bcbbd5 100644 (file)
@@ -1505,8 +1505,7 @@ void dcn10_init_hw(struct dc *dc)
                dmub_enable_outbox_notification(dc);
 
        /* we want to turn off all dp displays before doing detection */
-       if (dc->config.power_down_display_on_boot)
-               dc_link_blank_all_dp_displays(dc);
+       dc_link_blank_all_dp_displays(dc);
 
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
@@ -1514,7 +1513,7 @@ void dcn10_init_hw(struct dc *dc)
         * Otherwise, if taking control is not possible, we need to power
         * everything down.
         */
-       if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+       if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
                if (!is_optimized_init_done) {
                        hws->funcs.init_pipes(dc, dc->current_state);
                        if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
index f4b34c110eae3dac6fb06fe34ab61df227bb5d37..ca39361f71c8901dbf8501957008acba56e2f3c4 100644 (file)
@@ -1101,7 +1101,8 @@ void dcn10_link_encoder_disable_output(
 
 void dcn10_link_encoder_dp_set_lane_settings(
        struct link_encoder *enc,
-       const struct link_training_settings *link_settings)
+       const struct dc_link_settings *link_settings,
+       const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
 {
        struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
        union dpcd_training_lane_set training_lane_set = { { 0 } };
@@ -1116,26 +1117,25 @@ void dcn10_link_encoder_dp_set_lane_settings(
        cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
        cntl.transmitter = enc10->base.transmitter;
        cntl.connector_obj_id = enc10->base.connector;
-       cntl.lanes_number = link_settings->link_settings.lane_count;
+       cntl.lanes_number = link_settings->lane_count;
        cntl.hpd_sel = enc10->base.hpd_source;
-       cntl.pixel_clock = link_settings->link_settings.link_rate *
-                                               LINK_RATE_REF_FREQ_IN_KHZ;
+       cntl.pixel_clock = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
 
-       for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+       for (lane = 0; lane < link_settings->lane_count; lane++) {
                /* translate lane settings */
 
                training_lane_set.bits.VOLTAGE_SWING_SET =
-                       link_settings->lane_settings[lane].VOLTAGE_SWING;
+                               lane_settings[lane].VOLTAGE_SWING;
                training_lane_set.bits.PRE_EMPHASIS_SET =
-                       link_settings->lane_settings[lane].PRE_EMPHASIS;
+                               lane_settings[lane].PRE_EMPHASIS;
 
                /* post cursor 2 setting only applies to HBR2 link rate */
-               if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+               if (link_settings->link_rate == LINK_RATE_HIGH2) {
                        /* this is passed to VBIOS
                         * to program post cursor 2 level
                         */
                        training_lane_set.bits.POST_CURSOR2_SET =
-                               link_settings->lane_settings[lane].POST_CURSOR2;
+                                       lane_settings[lane].POST_CURSOR2;
                }
 
                cntl.lane_select = lane;
index c337588231ff0a485ded6951fed6862653b073d6..663aac0a164a3a40b960c24703e8d3f637562eb8 100644 (file)
@@ -581,7 +581,8 @@ void dcn10_link_encoder_disable_output(
 /* set DP lane settings */
 void dcn10_link_encoder_dp_set_lane_settings(
        struct link_encoder *enc,
-       const struct link_training_settings *link_settings);
+       const struct dc_link_settings *link_settings,
+       const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
 
 void dcn10_link_encoder_dp_set_phy_pattern(
        struct link_encoder *enc,
index bf4436d7aaab95e8c11c18fa9ca0cd817fa23633..b0c08ee6bc2cb316663bde8e5265a36a479785c9 100644 (file)
@@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
 
 }
 
-void enc1_stream_encoder_reset_fifo(
-       struct stream_encoder *enc)
-{
-       struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-
-       /* set DIG_START to 0x1 to reset FIFO */
-       REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
-       udelay(100);
-
-       /* write 0 to take the FIFO out of reset */
-       REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
-}
-
 void enc1_stream_encoder_dp_blank(
        struct dc_link *link,
        struct stream_encoder *enc)
@@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
                enc1_stream_encoder_send_immediate_sdp_message,
        .stop_dp_info_packets =
                enc1_stream_encoder_stop_dp_info_packets,
-       .reset_fifo =
-               enc1_stream_encoder_reset_fifo,
        .dp_blank =
                enc1_stream_encoder_dp_blank,
        .dp_unblank =
index a146a41f68e9ef786bbac83da31447d646043060..687d7e4bf7cadd2129b458aaabedaa4d70fbcaa7 100644 (file)
@@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
 void enc1_stream_encoder_stop_dp_info_packets(
        struct stream_encoder *enc);
 
-void enc1_stream_encoder_reset_fifo(
-       struct stream_encoder *enc);
-
 void enc1_stream_encoder_dp_blank(
        struct dc_link *link,
        struct stream_encoder *enc);
index f98aba308028baacb558f0561f21980e4ea8c24f..b3c9a9724efdd972ff46649fdb67923a27ed7060 100644 (file)
        type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
        type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
        type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+       type SYMCLK32_SE0_GATE_DISABLE;\
+       type SYMCLK32_SE1_GATE_DISABLE;\
+       type SYMCLK32_SE2_GATE_DISABLE;\
+       type SYMCLK32_SE3_GATE_DISABLE;\
        type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
        type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+       type SYMCLK32_LE0_GATE_DISABLE;\
+       type SYMCLK32_LE1_GATE_DISABLE;\
        type DPSTREAMCLK_ROOT_GATE_DISABLE;\
        type DPSTREAMCLK_GATE_DISABLE;\
        type HDMISTREAMCLK0_DTO_PHASE;\
        type HDMISTREAMCLK0_DTO_MODULO;\
        type HDMICHARCLK0_GATE_DISABLE;\
-       type HDMICHARCLK0_ROOT_GATE_DISABLE;
-
+       type HDMICHARCLK0_ROOT_GATE_DISABLE; \
+       type PHYASYMCLK_GATE_DISABLE; \
+       type PHYBSYMCLK_GATE_DISABLE; \
+       type PHYCSYMCLK_GATE_DISABLE; \
+       type PHYDSYMCLK_GATE_DISABLE; \
+       type PHYESYMCLK_GATE_DISABLE;
 
 struct dccg_shift {
        DCCG_REG_FIELD_LIST(uint8_t)
@@ -233,6 +243,7 @@ struct dccg_registers {
        uint32_t DSCCLK2_DTO_PARAM;
        uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
        uint32_t DPSTREAMCLK_GATE_DISABLE;
+       uint32_t DCCG_GATE_DISABLE_CNTL2;
        uint32_t DCCG_GATE_DISABLE_CNTL3;
        uint32_t HDMISTREAMCLK0_DTO_PARAM;
        uint32_t DCCG_GATE_DISABLE_CNTL4;
index 4991e93e5308c5175f7d1aabd272501688b24bb2..20a9cbb7c0a8177ff1fb75b442c055a308dac6b9 100644 (file)
@@ -55,6 +55,7 @@
 #include "inc/link_dpcd.h"
 #include "dpcd_defs.h"
 #include "inc/link_enc_cfg.h"
+#include "link_hwss.h"
 
 #define DC_LOGGER_INIT(logger)
 
@@ -2390,46 +2391,22 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
        uint32_t active_total_with_borders;
        uint32_t early_control = 0;
        struct timing_generator *tg = pipe_ctx->stream_res.tg;
-       struct link_encoder *link_enc;
+       const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
 
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign)
-               link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
-       else
-               link_enc = link->link_enc;
-       ASSERT(link_enc);
-
-       /* For MST, there are multiply stream go to only one link.
-        * connect DIG back_end to front_end while enable_stream and
-        * disconnect them during disable_stream
-        * BY this, it is logic clean to separate stream and link
-        */
        if (is_dp_128b_132b_signal(pipe_ctx)) {
-               if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control)
-                       pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control(
-                               pipe_ctx->stream->ctx->dc->hwseq, true);
-               setup_dp_hpo_stream(pipe_ctx, true);
-               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
-                               pipe_ctx->stream_res.hpo_dp_stream_enc);
-               pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
-                               pipe_ctx->stream_res.hpo_dp_stream_enc,
-                               pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
-                               pipe_ctx->link_res.hpo_dp_link_enc->inst);
+               if (dc->hwseq->funcs.setup_hpo_hw_control)
+                       dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
        }
 
-       if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
-               link_enc->funcs->connect_dig_be_to_fe(
-                       link_enc, pipe_ctx->stream_res.stream_enc->id, true);
-
-       if (dc_is_dp_signal(pipe_ctx->stream->signal))
-               dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+       link_hwss->setup_stream_encoder(pipe_ctx);
 
        if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
-               if (link->dc->hwss.program_dmdata_engine)
-                       link->dc->hwss.program_dmdata_engine(pipe_ctx);
+               if (dc->hwss.program_dmdata_engine)
+                       dc->hwss.program_dmdata_engine(pipe_ctx);
        }
 
-       link->dc->hwss.update_info_frame(pipe_ctx);
+       dc->hwss.update_info_frame(pipe_ctx);
 
        if (dc_is_dp_signal(pipe_ctx->stream->signal))
                dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
index 2bc93df023ad26d54ac9e211ff749a1e2a5a3eec..dfe2e1c25a26e8820dcb36ce1fa4c898752cea13 100644 (file)
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .timing_trace = false,
                .clock_trace = true,
                .disable_pplib_clock_request = true,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
@@ -1605,16 +1605,7 @@ static void get_pixel_clock_parameters(
 
        pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
 
-       /* Links supporting dynamically assigned link encoder will be assigned next
-        * available encoder if one not already assigned.
-        */
-       if (link->is_dig_mapping_flexible &&
-                       link->dc->res_pool->funcs->link_encs_assign) {
-               link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream);
-               if (link_enc == NULL)
-                       link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc);
-       } else
-               link_enc = stream->link->link_enc;
+       link_enc = link_enc_cfg_get_link_enc(link);
        ASSERT(link_enc);
 
        if (link_enc)
@@ -3093,8 +3084,14 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
        else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
                struct dc_link *link = context->streams[0]->sink->link;
 
-               if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+               /* zstate only supported on PWRSEQ0 */
+               if (link->link_index != 0)
+                       return DCN_ZSTATE_SUPPORT_DISALLOW;
+
+               if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
                        return DCN_ZSTATE_SUPPORT_ALLOW;
+               else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
+                       return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
                else
                        return DCN_ZSTATE_SUPPORT_DISALLOW;
        } else
index 8a70f92795c2aa80fbc9f33b13f1c561f1ff2575..aab25ca8343abe4275fc7aa379cf47210a08592f 100644 (file)
@@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
                enc1_stream_encoder_send_immediate_sdp_message,
        .stop_dp_info_packets =
                enc1_stream_encoder_stop_dp_info_packets,
-       .reset_fifo =
-               enc1_stream_encoder_reset_fifo,
        .dp_blank =
                enc1_stream_encoder_dp_blank,
        .dp_unblank =
index 8daa12730bc13e36a9b416cf63686d0c302e7952..a04ca4a9839257c6b860e11ca25d5e9ce526c041 100644 (file)
@@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
                enc3_stream_encoder_update_dp_info_packets,
        .stop_dp_info_packets =
                enc1_stream_encoder_stop_dp_info_packets,
-       .reset_fifo =
-               enc1_stream_encoder_reset_fifo,
        .dp_blank =
                enc1_stream_encoder_dp_blank,
        .dp_unblank =
index 1db1ca19411d8d4b6a1115a439d75b155fc40b06..ed0a0e5fd80539e46c184c6d6de036ba2e9ae349 100644 (file)
@@ -545,8 +545,7 @@ void dcn30_init_hw(struct dc *dc)
                        hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
 
        /* we want to turn off all dp displays before doing detection */
-       if (dc->config.power_down_display_on_boot)
-               dc_link_blank_all_dp_displays(dc);
+       dc_link_blank_all_dp_displays(dc);
 
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
@@ -554,7 +553,7 @@ void dcn30_init_hw(struct dc *dc)
         * Otherwise, if taking control is not possible, we need to power
         * everything down.
         */
-       if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+       if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
                hws->funcs.init_pipes(dc, dc->current_state);
                if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
                        dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
@@ -566,7 +565,7 @@ void dcn30_init_hw(struct dc *dc)
         * To avoid this, power down hardware on boot
         * if DIG is turned on and seamless boot not enabled
         */
-       if (dc->config.power_down_display_on_boot) {
+       if (!dc->config.seamless_boot_edp_requested) {
                struct dc_link *edp_links[MAX_NUM_EDP];
                struct dc_link *edp_link = NULL;
 
index 8ca26383b5687ca383bc2c9c3fb6d72f72c90a26..f10f7a0ca02a02b68934f4286b6a1a25bd787ce0 100644 (file)
@@ -72,8 +72,8 @@
 
 #include "nbio/nbio_7_4_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "mmhub/mmhub_2_0_0_offset.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
index 5d9637b0742923a47f30c0c181f516f29d021df1..4daf8931aa7ca4e6e4f16b859b95c0b080559436 100644 (file)
@@ -73,8 +73,8 @@
 
 #include "nbio/nbio_7_2_0_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "reg_helper.h"
 #include "dce/dmub_abm.h"
index 101620a8867aa9796742e32cb1f50d61812478b7..f9561d7f97a1da943f785c35e283f6fec8092892 100644 (file)
@@ -1,11 +1,6 @@
 #
 # (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
 #
-#  All rights reserved.  This notice is intended as a precaution against
-#  inadvertent publication and does not imply publication or any waiver
-#  of confidentiality.  The year included in the foregoing notice is the
-#  year of creation of the work.
-#
 #  Authors: AMD
 #
 # Makefile for dcn302.
@@ -20,13 +15,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
-endif
-
 ifdef CONFIG_X86
 ifdef IS_OLD_GCC
 # Stack alignment mismatch, proceed with caution.
index 2e9cbfa7663b8daa2524558b16fa114b8ad33aa0..88318e8ffca8de8fd794576a09424b47c2798814 100644 (file)
 #include "resource.h"
 #include "vm_helper.h"
 
+#include "dml/dcn302/dcn302_fpu.h"
+
 #include "dimgrey_cavefish_ip_offset.h"
 #include "dcn/dcn_3_0_2_offset.h"
 #include "dcn/dcn_3_0_2_sh_mask.h"
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 #include "nbio/nbio_7_4_offset.h"
 #include "amdgpu_socbb.h"
 
 #define DC_LOGGER_INIT(logger)
 
-struct _vcs_dpi_ip_params_st dcn3_02_ip = {
-               .use_min_dcfclk = 0,
-               .clamp_min_dcfclk = 0,
-               .odm_capable = 1,
-               .gpuvm_enable = 1,
-               .hostvm_enable = 0,
-               .gpuvm_max_page_table_levels = 4,
-               .hostvm_max_page_table_levels = 4,
-               .hostvm_cached_page_table_levels = 0,
-               .pte_group_size_bytes = 2048,
-               .num_dsc = 5,
-               .rob_buffer_size_kbytes = 184,
-               .det_buffer_size_kbytes = 184,
-               .dpte_buffer_size_in_pte_reqs_luma = 64,
-               .dpte_buffer_size_in_pte_reqs_chroma = 34,
-               .pde_proc_buffer_size_64k_reqs = 48,
-               .dpp_output_buffer_pixels = 2560,
-               .opp_output_buffer_lines = 1,
-               .pixel_chunk_size_kbytes = 8,
-               .pte_enable = 1,
-               .max_page_table_levels = 2,
-               .pte_chunk_size_kbytes = 2,  // ?
-               .meta_chunk_size_kbytes = 2,
-               .writeback_chunk_size_kbytes = 8,
-               .line_buffer_size_bits = 789504,
-               .is_line_buffer_bpp_fixed = 0,  // ?
-               .line_buffer_fixed_bpp = 0,     // ?
-               .dcc_supported = true,
-               .writeback_interface_buffer_size_kbytes = 90,
-               .writeback_line_buffer_buffer_size = 0,
-               .max_line_buffer_lines = 12,
-               .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
-               .writeback_chroma_buffer_size_kbytes = 8,
-               .writeback_chroma_line_buffer_width_pixels = 4,
-               .writeback_max_hscl_ratio = 1,
-               .writeback_max_vscl_ratio = 1,
-               .writeback_min_hscl_ratio = 1,
-               .writeback_min_vscl_ratio = 1,
-               .writeback_max_hscl_taps = 1,
-               .writeback_max_vscl_taps = 1,
-               .writeback_line_buffer_luma_buffer_size = 0,
-               .writeback_line_buffer_chroma_buffer_size = 14643,
-               .cursor_buffer_size = 8,
-               .cursor_chunk_size = 2,
-               .max_num_otg = 5,
-               .max_num_dpp = 5,
-               .max_num_wb = 1,
-               .max_dchub_pscl_bw_pix_per_clk = 4,
-               .max_pscl_lb_bw_pix_per_clk = 2,
-               .max_lb_vscl_bw_pix_per_clk = 4,
-               .max_vscl_hscl_bw_pix_per_clk = 4,
-               .max_hscl_ratio = 6,
-               .max_vscl_ratio = 6,
-               .hscl_mults = 4,
-               .vscl_mults = 4,
-               .max_hscl_taps = 8,
-               .max_vscl_taps = 8,
-               .dispclk_ramp_margin_percent = 1,
-               .underscan_factor = 1.11,
-               .min_vblank_lines = 32,
-               .dppclk_delay_subtotal = 46,
-               .dynamic_metadata_vm_enabled = true,
-               .dppclk_delay_scl_lb_only = 16,
-               .dppclk_delay_scl = 50,
-               .dppclk_delay_cnvc_formatter = 27,
-               .dppclk_delay_cnvc_cursor = 6,
-               .dispclk_delay_subtotal = 119,
-               .dcfclk_cstate_latency = 5.2, // SRExitTime
-               .max_inter_dcn_tile_repeaters = 8,
-               .max_num_hdmi_frl_outputs = 1,
-               .odm_combine_4to1_supported = true,
-
-               .xfc_supported = false,
-               .xfc_fill_bw_overhead_percent = 10.0,
-               .xfc_fill_constant_bytes = 0,
-               .gfx7_compat_tiling_supported = 0,
-               .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
-               .clock_limits = {
-                               {
-                                               .state = 0,
-                                               .dispclk_mhz = 562.0,
-                                               .dppclk_mhz = 300.0,
-                                               .phyclk_mhz = 300.0,
-                                               .phyclk_d18_mhz = 667.0,
-                                               .dscclk_mhz = 405.6,
-                               },
-               },
-
-               .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
-               .num_states = 1,
-               .sr_exit_time_us = 26.5,
-               .sr_enter_plus_exit_time_us = 31,
-               .urgent_latency_us = 4.0,
-               .urgent_latency_pixel_data_only_us = 4.0,
-               .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
-               .urgent_latency_vm_data_only_us = 4.0,
-               .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
-               .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
-               .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
-               .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
-               .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
-               .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
-               .max_avg_sdp_bw_use_normal_percent = 60.0,
-               .max_avg_dram_bw_use_normal_percent = 40.0,
-               .writeback_latency_us = 12.0,
-               .max_request_size_bytes = 256,
-               .fabric_datapath_to_dcn_data_return_bytes = 64,
-               .dcn_downspread_percent = 0.5,
-               .downspread_percent = 0.38,
-               .dram_page_open_time_ns = 50.0,
-               .dram_rw_turnaround_time_ns = 17.5,
-               .dram_return_buffer_per_channel_bytes = 8192,
-               .round_trip_ping_latency_dcfclk_cycles = 156,
-               .urgent_out_of_order_return_per_channel_bytes = 4096,
-               .channel_interleave_bytes = 256,
-               .num_banks = 8,
-               .gpuvm_min_page_size_bytes = 4096,
-               .hostvm_min_page_size_bytes = 4096,
-               .dram_clock_change_latency_us = 404,
-               .dummy_pstate_latency_us = 5,
-               .writeback_dram_clock_change_latency_us = 23.0,
-               .return_bus_width_bytes = 64,
-               .dispclk_dppclk_vco_speed_mhz = 3650,
-               .xfc_bus_transport_time_us = 20,      // ?
-               .xfc_xbuf_latency_tolerance_us = 4,  // ?
-               .use_urgent_burst_bw = 1,            // ?
-               .do_urgent_latency_adjustment = true,
-               .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
-               .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
-};
-
 static const struct dc_debug_options debug_defaults_drv = {
                .disable_dmcu = true,
                .force_abm_enable = false,
@@ -1105,24 +973,19 @@ static bool init_soc_bounding_box(struct dc *dc,  struct resource_pool *pool)
        loaded_ip->max_num_otg = pool->pipe_count;
        loaded_ip->max_num_dpp = pool->pipe_count;
        loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
+       DC_FP_START();
        dcn20_patch_bounding_box(dc, loaded_bb);
+       DC_FP_END();
 
        if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
                struct bp_soc_bb_info bb_info = { 0 };
 
                if (dc->ctx->dc_bios->funcs->get_soc_bb_info(
                            dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
-                       if (bb_info.dram_clock_change_latency_100ns > 0)
-                               dcn3_02_soc.dram_clock_change_latency_us =
-                                       bb_info.dram_clock_change_latency_100ns * 10;
 
-                       if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
-                               dcn3_02_soc.sr_enter_plus_exit_time_us =
-                                       bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
-                       if (bb_info.dram_sr_exit_latency_100ns > 0)
-                               dcn3_02_soc.sr_exit_time_us =
-                                       bb_info.dram_sr_exit_latency_100ns * 10;
+                               DC_FP_START();
+                               dcn302_fpu_init_soc_bounding_box(bb_info);
+                               DC_FP_END();
                }
        }
 
@@ -1257,170 +1120,11 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool)
        *pool = NULL;
 }
 
-static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
-               unsigned int *optimal_dcfclk,
-               unsigned int *optimal_fclk)
-{
-       double bw_from_dram, bw_from_dram1, bw_from_dram2;
-
-       bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
-               dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
-       bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
-               dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
-
-       bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
-
-       if (optimal_fclk)
-               *optimal_fclk = bw_from_dram /
-               (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
-
-       if (optimal_dcfclk)
-               *optimal_dcfclk =  bw_from_dram /
-               (dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
-}
-
 void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
 {
-       unsigned int i, j;
-       unsigned int num_states = 0;
-
-       unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
-       unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
-       unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
-       unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
-
-       unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
-       unsigned int num_dcfclk_sta_targets = 4;
-       unsigned int num_uclk_states;
-
-
-       if (dc->ctx->dc_bios->vram_info.num_chans)
-               dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
-
-       if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
-               dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
-
-       dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-       dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
-       if (bw_params->clk_table.entries[0].memclk_mhz) {
-               int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
-
-               for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
-                       if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
-                               max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
-                       if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
-                               max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
-                       if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
-                               max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
-                       if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
-                               max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
-               }
-               if (!max_dcfclk_mhz)
-                       max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
-               if (!max_dispclk_mhz)
-                       max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
-               if (!max_dppclk_mhz)
-                       max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
-               if (!max_phyclk_mhz)
-                       max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
-
-               if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
-                       /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
-                       dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
-                       num_dcfclk_sta_targets++;
-               } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
-                       /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
-                       for (i = 0; i < num_dcfclk_sta_targets; i++) {
-                               if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
-                                       dcfclk_sta_targets[i] = max_dcfclk_mhz;
-                                       break;
-                               }
-                       }
-                       /* Update size of array since we "removed" duplicates */
-                       num_dcfclk_sta_targets = i + 1;
-               }
-
-               num_uclk_states = bw_params->clk_table.num_entries;
-
-               /* Calculate optimal dcfclk for each uclk */
-               for (i = 0; i < num_uclk_states; i++) {
-                       dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
-                                       &optimal_dcfclk_for_uclk[i], NULL);
-                       if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) {
-                               optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
-                       }
-               }
-
-               /* Calculate optimal uclk for each dcfclk sta target */
-               for (i = 0; i < num_dcfclk_sta_targets; i++) {
-                       for (j = 0; j < num_uclk_states; j++) {
-                               if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
-                                       optimal_uclk_for_dcfclk_sta_targets[i] =
-                                                       bw_params->clk_table.entries[j].memclk_mhz * 16;
-                                       break;
-                               }
-                       }
-               }
-
-               i = 0;
-               j = 0;
-               /* create the final dcfclk and uclk table */
-               while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
-                       if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
-                               dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
-                               dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
-                       } else {
-                               if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
-                                       dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
-                                       dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
-                               } else {
-                                       j = num_uclk_states;
-                               }
-                       }
-               }
-
-               while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
-                       dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
-                       dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
-               }
-
-               while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
-                               optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
-                       dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
-                       dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
-               }
-
-               dcn3_02_soc.num_states = num_states;
-               for (i = 0; i < dcn3_02_soc.num_states; i++) {
-                       dcn3_02_soc.clock_limits[i].state = i;
-                       dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
-                       dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
-                       dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
-
-                       /* Fill all states with max values of all other clocks */
-                       dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
-                       dcn3_02_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
-                       dcn3_02_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
-                       /* Populate from bw_params for DTBCLK, SOCCLK */
-                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
-                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
-                       else
-                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
-                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
-                               dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
-                       else
-                               dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
-                       /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
-                       /* FCLK, PHYCLK_D18, DSCCLK */
-                       dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
-                       dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
-               }
-               /* re-init DML with updated bb */
-               dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
-               if (dc->current_state)
-                       dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
-       }
+       DC_FP_START();
+       dcn302_fpu_update_bw_bounding_box(dc, bw_params);
+       DC_FP_END();
 }
 
 static struct resource_funcs dcn302_res_pool_funcs = {
index 42d2c73e30bc163801ed58e27dc1c7ccf1cfbc41..9f24e73b92b314a05c367b4f1850cc69d5e7595f 100644 (file)
@@ -28,6 +28,9 @@
 
 #include "core_types.h"
 
+extern struct _vcs_dpi_ip_params_st dcn3_02_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc;
+
 struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc);
 
 void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
index 2de687f64cf6a874a36bbc24dd12484aca36e526..36649716e9919db7ac57d78b40c9eb3fbdab0f74 100644 (file)
@@ -48,8 +48,8 @@
 #include "sienna_cichlid_ip_offset.h"
 #include "dcn/dcn_3_0_3_offset.h"
 #include "dcn/dcn_3_0_3_sh_mask.h"
-#include "dcn/dpcs_3_0_3_offset.h"
-#include "dcn/dpcs_3_0_3_sh_mask.h"
+#include "dpcs/dpcs_3_0_3_offset.h"
+#include "dpcs/dpcs_3_0_3_sh_mask.h"
 #include "nbio/nbio_2_3_offset.h"
 
 #define DC_LOGGER_INIT(logger)
index ea4f8e06b07ccf10caacdeb73851424cd6a9bf36..287a1066b547280b84ee350f5c4a58aa106213fc 100644 (file)
@@ -121,7 +121,8 @@ static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst)
                return;
        }
        if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
-               REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+               REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                       DPSTREAMCLK_GATE_DISABLE, 1,
                        DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
 }
 
@@ -130,8 +131,9 @@ static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
        struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
 
        if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
-               REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
-                               DPSTREAMCLK_ROOT_GATE_DISABLE, 0);
+               REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                               DPSTREAMCLK_ROOT_GATE_DISABLE, 0,
+                               DPSTREAMCLK_GATE_DISABLE, 0);
 
        switch (otg_inst) {
        case 0:
@@ -180,7 +182,8 @@ void dccg31_enable_symclk32_se(
        switch (hpo_se_inst) {
        case 0:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE0_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_SE0_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_SE_CNTL,
                                SYMCLK32_SE0_SRC_SEL, phyd32clk,
@@ -188,7 +191,8 @@ void dccg31_enable_symclk32_se(
                break;
        case 1:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE1_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_SE1_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_SE_CNTL,
                                SYMCLK32_SE1_SRC_SEL, phyd32clk,
@@ -196,7 +200,8 @@ void dccg31_enable_symclk32_se(
                break;
        case 2:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE2_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_SE2_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_SE_CNTL,
                                SYMCLK32_SE2_SRC_SEL, phyd32clk,
@@ -204,7 +209,8 @@ void dccg31_enable_symclk32_se(
                break;
        case 3:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE3_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_SE3_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_SE_CNTL,
                                SYMCLK32_SE3_SRC_SEL, phyd32clk,
@@ -229,7 +235,8 @@ void dccg31_disable_symclk32_se(
                                SYMCLK32_SE0_SRC_SEL, 0,
                                SYMCLK32_SE0_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE0_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
                break;
        case 1:
@@ -237,7 +244,8 @@ void dccg31_disable_symclk32_se(
                                SYMCLK32_SE1_SRC_SEL, 0,
                                SYMCLK32_SE1_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE1_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
                break;
        case 2:
@@ -245,7 +253,8 @@ void dccg31_disable_symclk32_se(
                                SYMCLK32_SE2_SRC_SEL, 0,
                                SYMCLK32_SE2_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE2_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
                break;
        case 3:
@@ -253,7 +262,8 @@ void dccg31_disable_symclk32_se(
                                SYMCLK32_SE3_SRC_SEL, 0,
                                SYMCLK32_SE3_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_SE3_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
                break;
        default:
@@ -275,7 +285,8 @@ void dccg31_enable_symclk32_le(
        switch (hpo_le_inst) {
        case 0:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_LE0_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE0_SRC_SEL, phyd32clk,
@@ -283,7 +294,8 @@ void dccg31_enable_symclk32_le(
                break;
        case 1:
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_LE1_GATE_DISABLE, 1,
                                        SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
                REG_UPDATE_2(SYMCLK32_LE_CNTL,
                                SYMCLK32_LE1_SRC_SEL, phyd32clk,
@@ -308,7 +320,8 @@ void dccg31_disable_symclk32_le(
                                SYMCLK32_LE0_SRC_SEL, 0,
                                SYMCLK32_LE0_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_LE0_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
                break;
        case 1:
@@ -316,7 +329,8 @@ void dccg31_disable_symclk32_le(
                                SYMCLK32_LE1_SRC_SEL, 0,
                                SYMCLK32_LE1_EN, 0);
                if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
-                       REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+                       REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3,
+                                       SYMCLK32_LE1_GATE_DISABLE, 0,
                                        SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
                break;
        default:
@@ -406,54 +420,89 @@ void dccg31_set_physymclk(
        /* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */
        switch (phy_inst) {
        case 0:
-               if (force_enable)
+               if (force_enable) {
                        REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
                                        PHYASYMCLK_FORCE_EN, 1,
                                        PHYASYMCLK_FORCE_SRC_SEL, clk_src);
-               else
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYASYMCLK_GATE_DISABLE, 1);
+               } else {
                        REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
                                        PHYASYMCLK_FORCE_EN, 0,
                                        PHYASYMCLK_FORCE_SRC_SEL, 0);
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYASYMCLK_GATE_DISABLE, 0);
+               }
                break;
        case 1:
-               if (force_enable)
+               if (force_enable) {
                        REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
                                        PHYBSYMCLK_FORCE_EN, 1,
                                        PHYBSYMCLK_FORCE_SRC_SEL, clk_src);
-               else
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYBSYMCLK_GATE_DISABLE, 1);
+               } else {
                        REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
                                        PHYBSYMCLK_FORCE_EN, 0,
                                        PHYBSYMCLK_FORCE_SRC_SEL, 0);
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYBSYMCLK_GATE_DISABLE, 0);
+               }
                break;
        case 2:
-               if (force_enable)
+               if (force_enable) {
                        REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
                                        PHYCSYMCLK_FORCE_EN, 1,
                                        PHYCSYMCLK_FORCE_SRC_SEL, clk_src);
-               else
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYCSYMCLK_GATE_DISABLE, 1);
+               } else {
                        REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
                                        PHYCSYMCLK_FORCE_EN, 0,
                                        PHYCSYMCLK_FORCE_SRC_SEL, 0);
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYCSYMCLK_GATE_DISABLE, 0);
+               }
                break;
        case 3:
-               if (force_enable)
+               if (force_enable) {
                        REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
                                        PHYDSYMCLK_FORCE_EN, 1,
                                        PHYDSYMCLK_FORCE_SRC_SEL, clk_src);
-               else
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYDSYMCLK_GATE_DISABLE, 1);
+               } else {
                        REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
                                        PHYDSYMCLK_FORCE_EN, 0,
                                        PHYDSYMCLK_FORCE_SRC_SEL, 0);
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYDSYMCLK_GATE_DISABLE, 0);
+               }
                break;
        case 4:
-               if (force_enable)
+               if (force_enable) {
                        REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
                                        PHYESYMCLK_FORCE_EN, 1,
                                        PHYESYMCLK_FORCE_SRC_SEL, clk_src);
-               else
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYESYMCLK_GATE_DISABLE, 1);
+               } else {
                        REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
                                        PHYESYMCLK_FORCE_EN, 0,
                                        PHYESYMCLK_FORCE_SRC_SEL, 0);
+                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                                       PHYESYMCLK_GATE_DISABLE, 0);
+               }
                break;
        default:
                BREAK_TO_DEBUGGER();
@@ -615,6 +664,13 @@ void dccg31_init(struct dccg *dccg)
                dccg31_disable_dpstreamclk(dccg, 3);
        }
 
+       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) {
+               dccg31_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+               dccg31_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+               dccg31_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+               dccg31_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+               dccg31_set_physymclk(dccg, 4, PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+       }
 }
 
 static const struct dccg_funcs dccg31_funcs = {
index a013a32bbaf7b775eb8a2c26aa9a328ad8a9101d..269cabbea72ab98a3ee540da042315150eb9899e 100644 (file)
@@ -66,6 +66,7 @@
        SR(DSCCLK1_DTO_PARAM),\
        SR(DSCCLK2_DTO_PARAM),\
        SR(DSCCLK_DTO_CTRL),\
+       SR(DCCG_GATE_DISABLE_CNTL2),\
        SR(DCCG_GATE_DISABLE_CNTL3),\
        SR(HDMISTREAMCLK0_DTO_PARAM)
 
        DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
        DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
        DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+       DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\
+       DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\
+       DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\
+       DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\
+       DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\
        DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\
        DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\
        DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
index 5065904c78335743e92551f12d636772d5714723..23621ff08c9053c781e93ddcb9904522f5301b07 100644 (file)
@@ -710,6 +710,16 @@ static void dcn31_hpo_dp_stream_enc_read_state(
        }
 }
 
+static void dcn31_set_hblank_min_symbol_width(
+               struct hpo_dp_stream_encoder *enc,
+               uint16_t width)
+{
+       struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+       REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0,
+                       HBLANK_MINIMUM_SYMBOL_WIDTH, width);
+}
+
 static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
        .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream,
        .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank,
@@ -725,6 +735,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
        .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable,
        .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable,
        .read_state = dcn31_hpo_dp_stream_enc_read_state,
+       .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width,
 };
 
 void dcn31_hpo_dp_stream_encoder_construct(
index 70b94fc25304b9128a4e39d81f5abc2a4d338561..7c77c71591a08219341a3134b94f0f7606ceed94 100644 (file)
@@ -80,7 +80,8 @@
        SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\
        SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\
        SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\
-       SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id)
+       SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \
+       SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id)
 
 #define DCN3_1_HPO_DP_STREAM_ENC_REGS \
        uint32_t DP_STREAM_MAPPER_CONTROL0;\
        uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\
        uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\
        uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\
-       uint32_t DP_SYM32_ENC_VID_CRC_CONTROL
+       uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\
+       uint32_t DP_SYM32_ENC_HBLANK_CONTROL
 
 
 #define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\
        type GSP_SOF_REFERENCE;\
        type METADATA_PACKET_ENABLE;\
        type CRC_ENABLE;\
-       type CRC_CONT_MODE_ENABLE
+       type CRC_CONT_MODE_ENABLE;\
+       type HBLANK_MINIMUM_SYMBOL_WIDTH
 
 
 struct dcn31_hpo_dp_stream_encoder_registers {
index 90c73a1cb98614d42dc9177ed4e5100fa0a2bfc2..5e3bcaf12cac4b55460981dd2e4b79586187b2e4 100644 (file)
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
        ret_val = wm_ns * refclk_mhz;
        ret_val /= 1000;
 
-       if (ret_val > clamp_value)
+       if (ret_val > clamp_value) {
+               /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+               ASSERT(0);
                ret_val = clamp_value;
+       }
 
        return ret_val;
 }
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
                hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
 
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
                hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
        } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
                hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
 
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
                hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
        } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
                hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
 
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
                hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
        } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
                hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
                prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
                                DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
 
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
        if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
                hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
                prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0x3fff);
                REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
                                DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
        } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->a.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->b.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->c.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_exit_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_exit_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
                                watermarks->d.cstate_pstate.cstate_exit_z8_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.cstate_exit_z8_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
                                DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->a.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->a.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->b.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->b.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->c.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->c.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
                                watermarks->d.cstate_pstate.pstate_change_ns;
                prog_wm_value = convert_and_clamp(
                                watermarks->d.cstate_pstate.pstate_change_ns,
-                               refclk_mhz, 0x1fffff);
+                               refclk_mhz, 0xffff);
                REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
                                DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
index 4206ce5bf9a92dc68d42c054b48eb9575f8f893a..b2cfd277b9139a6e0b1bd5fbfec8eafaa5007b6b 100644 (file)
@@ -197,8 +197,7 @@ void dcn31_init_hw(struct dc *dc)
                dmub_enable_outbox_notification(dc);
 
        /* we want to turn off all dp displays before doing detection */
-       if (dc->config.power_down_display_on_boot)
-               dc_link_blank_all_dp_displays(dc);
+       dc_link_blank_all_dp_displays(dc);
 
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
@@ -206,7 +205,7 @@ void dcn31_init_hw(struct dc *dc)
         * Otherwise, if taking control is not possible, we need to power
         * everything down.
         */
-       if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+       if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
                hws->funcs.init_pipes(dc, dc->current_state);
                if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
                        dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
index e8562fa11366a481133ba9073bf3cee2954ef547..8afe2130d7c54ff911acf8df48e0ab5714f42ecb 100644 (file)
@@ -161,7 +161,7 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
        return true;
 }
 
-static void optc31_set_drr(
+void optc31_set_drr(
        struct timing_generator *optc,
        const struct drr_params *params)
 {
index d8ef2f0d0c9582034f446637ecb97108481b1ee8..a37b16040c1d6bae78b271bcc20b4d3367449341 100644 (file)
 
 void dcn31_timing_generator_init(struct optc *optc1);
 
+void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
+
 #endif /* __DC_OPTC_DCN31_H__ */
index 8d64187478e42dcf51aa549258e28e507d6c9d5d..1166748b85a7dd6157b7a314ae318ef3daf4493d 100644 (file)
@@ -1033,6 +1033,7 @@ static const struct dc_debug_options debug_defaults_drv = {
        .optimize_edp_link_rate = true,
        .enable_sw_cntl_psr = true,
        .apply_vendor_specific_lttpr_wa = true,
+       .enable_z9_disable_interface = false
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -1841,7 +1842,8 @@ static int dcn31_populate_dml_pipes_from_context(
                if (is_dual_plane(pipe->plane_state->format)
                                && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
                        dc->config.enable_4to1MPC = true;
-               } else if (!is_dual_plane(pipe->plane_state->format)) {
+               } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) {
+                       /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */
                        context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
                        pipes[0].pipe.src.unbounded_req_mode = true;
                }
index 7f94e3f70d7f517bf5daae80c29aa54ff63b7a07..b71b5fb894e2d032eaf630bbeb702b2c9f7e5df1 100644 (file)
@@ -113,7 +113,7 @@ bool dm_helpers_dp_mst_start_top_mgr(
                const struct dc_link *link,
                bool boot);
 
-void dm_helpers_dp_mst_stop_top_mgr(
+bool dm_helpers_dp_mst_stop_top_mgr(
                struct dc_context *ctx,
                struct dc_link *link);
 /**
@@ -170,9 +170,9 @@ bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enabl
 
 void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us);
 
-// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd
+// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy
 #define IS_SMU_TIMEOUT(result) \
-       (!(result == 0x1 || result == 0xFE))
+       (result == 0x0)
 
 int dm_helper_dmub_aux_transfer_sync(
                struct dc_context *ctx,
index eee6672bd32dea0e16101fabaf1b2a1b7d8b7a17..b16c492593e23737410139944dd34094da0eaf5b 100644 (file)
@@ -71,8 +71,12 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)
@@ -93,10 +97,14 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
 
-DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o
 
 ifdef CONFIG_DRM_AMD_DC_DCN
+DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
 DML += dcn20/dcn20_fpu.o
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
 DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
@@ -104,7 +112,9 @@ DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
 DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
 DML += dcn301/dcn301_fpu.o
+DML += dcn302/dcn302_fpu.o
 DML += dsc/rc_calc_fpu.o
+DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
 endif
 
 AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
similarity index 99%
rename from drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
rename to drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c
index e6ef36de082540f1a91985f800db41a16cf9e6b6..0100a6053ab6b2d10ded15b84633ff21d17da8e6 100644 (file)
@@ -3411,35 +3411,33 @@ bool bw_calcs(struct dc_context *ctx,
                        calcs_output->stutter_exit_wm_ns[5].c_mark =
                                bw_fixed_to_int(bw_mul(data->
                                        stutter_exit_watermark[9], bw_int_to_fixed(1000)));
-
-               calcs_output->stutter_entry_wm_ns[0].c_mark =
-                       bw_fixed_to_int(bw_mul(data->
-                               stutter_entry_watermark[4], bw_int_to_fixed(1000)));
-               calcs_output->stutter_entry_wm_ns[1].c_mark =
-                       bw_fixed_to_int(bw_mul(data->
-                               stutter_entry_watermark[5], bw_int_to_fixed(1000)));
-               calcs_output->stutter_entry_wm_ns[2].c_mark =
-                       bw_fixed_to_int(bw_mul(data->
-                               stutter_entry_watermark[6], bw_int_to_fixed(1000)));
-               if (ctx->dc->caps.max_slave_planes) {
-                       calcs_output->stutter_entry_wm_ns[3].c_mark =
+                       calcs_output->stutter_entry_wm_ns[0].c_mark =
                                bw_fixed_to_int(bw_mul(data->
-                                       stutter_entry_watermark[0], bw_int_to_fixed(1000)));
-                       calcs_output->stutter_entry_wm_ns[4].c_mark =
+                                       stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+                       calcs_output->stutter_entry_wm_ns[1].c_mark =
                                bw_fixed_to_int(bw_mul(data->
-                                       stutter_entry_watermark[1], bw_int_to_fixed(1000)));
-               } else {
-                       calcs_output->stutter_entry_wm_ns[3].c_mark =
+                                       stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+                       calcs_output->stutter_entry_wm_ns[2].c_mark =
                                bw_fixed_to_int(bw_mul(data->
-                                       stutter_entry_watermark[7], bw_int_to_fixed(1000)));
-                       calcs_output->stutter_entry_wm_ns[4].c_mark =
+                                       stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+                       if (ctx->dc->caps.max_slave_planes) {
+                               calcs_output->stutter_entry_wm_ns[3].c_mark =
+                                       bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[0],
+                                               bw_int_to_fixed(1000)));
+                               calcs_output->stutter_entry_wm_ns[4].c_mark =
+                                       bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[1],
+                                               bw_int_to_fixed(1000)));
+                       } else {
+                               calcs_output->stutter_entry_wm_ns[3].c_mark =
+                                       bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[7],
+                                               bw_int_to_fixed(1000)));
+                               calcs_output->stutter_entry_wm_ns[4].c_mark =
+                                       bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[8],
+                                               bw_int_to_fixed(1000)));
+                       }
+                       calcs_output->stutter_entry_wm_ns[5].c_mark =
                                bw_fixed_to_int(bw_mul(data->
-                                       stutter_entry_watermark[8], bw_int_to_fixed(1000)));
-               }
-               calcs_output->stutter_entry_wm_ns[5].c_mark =
-                       bw_fixed_to_int(bw_mul(data->
-                               stutter_entry_watermark[9], bw_int_to_fixed(1000)));
-
+                                       stutter_entry_watermark[9], bw_int_to_fixed(1000)));
                        calcs_output->urgent_wm_ns[0].c_mark =
                                bw_fixed_to_int(bw_mul(data->
                                        urgent_watermark[4], bw_int_to_fixed(1000)));
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
new file mode 100644 (file)
index 0000000..e2bcd20
--- /dev/null
@@ -0,0 +1,357 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "resource.h"
+#include "clk_mgr.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn302/dcn302_resource.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn302_fpu.h"
+
+struct _vcs_dpi_ip_params_st dcn3_02_ip = {
+               .use_min_dcfclk = 0,
+               .clamp_min_dcfclk = 0,
+               .odm_capable = 1,
+               .gpuvm_enable = 1,
+               .hostvm_enable = 0,
+               .gpuvm_max_page_table_levels = 4,
+               .hostvm_max_page_table_levels = 4,
+               .hostvm_cached_page_table_levels = 0,
+               .pte_group_size_bytes = 2048,
+               .num_dsc = 5,
+               .rob_buffer_size_kbytes = 184,
+               .det_buffer_size_kbytes = 184,
+               .dpte_buffer_size_in_pte_reqs_luma = 64,
+               .dpte_buffer_size_in_pte_reqs_chroma = 34,
+               .pde_proc_buffer_size_64k_reqs = 48,
+               .dpp_output_buffer_pixels = 2560,
+               .opp_output_buffer_lines = 1,
+               .pixel_chunk_size_kbytes = 8,
+               .pte_enable = 1,
+               .max_page_table_levels = 2,
+               .pte_chunk_size_kbytes = 2,  // ?
+               .meta_chunk_size_kbytes = 2,
+               .writeback_chunk_size_kbytes = 8,
+               .line_buffer_size_bits = 789504,
+               .is_line_buffer_bpp_fixed = 0,  // ?
+               .line_buffer_fixed_bpp = 0,     // ?
+               .dcc_supported = true,
+               .writeback_interface_buffer_size_kbytes = 90,
+               .writeback_line_buffer_buffer_size = 0,
+               .max_line_buffer_lines = 12,
+               .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
+               .writeback_chroma_buffer_size_kbytes = 8,
+               .writeback_chroma_line_buffer_width_pixels = 4,
+               .writeback_max_hscl_ratio = 1,
+               .writeback_max_vscl_ratio = 1,
+               .writeback_min_hscl_ratio = 1,
+               .writeback_min_vscl_ratio = 1,
+               .writeback_max_hscl_taps = 1,
+               .writeback_max_vscl_taps = 1,
+               .writeback_line_buffer_luma_buffer_size = 0,
+               .writeback_line_buffer_chroma_buffer_size = 14643,
+               .cursor_buffer_size = 8,
+               .cursor_chunk_size = 2,
+               .max_num_otg = 5,
+               .max_num_dpp = 5,
+               .max_num_wb = 1,
+               .max_dchub_pscl_bw_pix_per_clk = 4,
+               .max_pscl_lb_bw_pix_per_clk = 2,
+               .max_lb_vscl_bw_pix_per_clk = 4,
+               .max_vscl_hscl_bw_pix_per_clk = 4,
+               .max_hscl_ratio = 6,
+               .max_vscl_ratio = 6,
+               .hscl_mults = 4,
+               .vscl_mults = 4,
+               .max_hscl_taps = 8,
+               .max_vscl_taps = 8,
+               .dispclk_ramp_margin_percent = 1,
+               .underscan_factor = 1.11,
+               .min_vblank_lines = 32,
+               .dppclk_delay_subtotal = 46,
+               .dynamic_metadata_vm_enabled = true,
+               .dppclk_delay_scl_lb_only = 16,
+               .dppclk_delay_scl = 50,
+               .dppclk_delay_cnvc_formatter = 27,
+               .dppclk_delay_cnvc_cursor = 6,
+               .dispclk_delay_subtotal = 119,
+               .dcfclk_cstate_latency = 5.2, // SRExitTime
+               .max_inter_dcn_tile_repeaters = 8,
+               .max_num_hdmi_frl_outputs = 1,
+               .odm_combine_4to1_supported = true,
+
+               .xfc_supported = false,
+               .xfc_fill_bw_overhead_percent = 10.0,
+               .xfc_fill_constant_bytes = 0,
+               .gfx7_compat_tiling_supported = 0,
+               .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
+               .clock_limits = {
+                               {
+                                               .state = 0,
+                                               .dispclk_mhz = 562.0,
+                                               .dppclk_mhz = 300.0,
+                                               .phyclk_mhz = 300.0,
+                                               .phyclk_d18_mhz = 667.0,
+                                               .dscclk_mhz = 405.6,
+                               },
+               },
+
+               .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
+               .num_states = 1,
+               .sr_exit_time_us = 26.5,
+               .sr_enter_plus_exit_time_us = 31,
+               .urgent_latency_us = 4.0,
+               .urgent_latency_pixel_data_only_us = 4.0,
+               .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+               .urgent_latency_vm_data_only_us = 4.0,
+               .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+               .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+               .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+               .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+               .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+               .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+               .max_avg_sdp_bw_use_normal_percent = 60.0,
+               .max_avg_dram_bw_use_normal_percent = 40.0,
+               .writeback_latency_us = 12.0,
+               .max_request_size_bytes = 256,
+               .fabric_datapath_to_dcn_data_return_bytes = 64,
+               .dcn_downspread_percent = 0.5,
+               .downspread_percent = 0.38,
+               .dram_page_open_time_ns = 50.0,
+               .dram_rw_turnaround_time_ns = 17.5,
+               .dram_return_buffer_per_channel_bytes = 8192,
+               .round_trip_ping_latency_dcfclk_cycles = 156,
+               .urgent_out_of_order_return_per_channel_bytes = 4096,
+               .channel_interleave_bytes = 256,
+               .num_banks = 8,
+               .gpuvm_min_page_size_bytes = 4096,
+               .hostvm_min_page_size_bytes = 4096,
+               .dram_clock_change_latency_us = 404,
+               .dummy_pstate_latency_us = 5,
+               .writeback_dram_clock_change_latency_us = 23.0,
+               .return_bus_width_bytes = 64,
+               .dispclk_dppclk_vco_speed_mhz = 3650,
+               .xfc_bus_transport_time_us = 20,      // ?
+               .xfc_xbuf_latency_tolerance_us = 4,  // ?
+               .use_urgent_burst_bw = 1,            // ?
+               .do_urgent_latency_adjustment = true,
+               .urgent_latency_adjustment_fabric_clock_component_us = 1.0,
+               .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000,
+};
+
+static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts,
+       unsigned int *optimal_dcfclk,
+       unsigned int *optimal_fclk)
+{
+
+                       double bw_from_dram, bw_from_dram1, bw_from_dram2;
+
+                       bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans *
+                               dcn3_02_soc.dram_channel_width_bytes *
+                               (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100);
+                       bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans *
+                               dcn3_02_soc.dram_channel_width_bytes *
+                               (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100);
+
+                       bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2;
+
+                       if (optimal_fclk)
+                               *optimal_fclk = bw_from_dram /
+                               (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes *
+                                (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+
+                       if (optimal_dcfclk)
+                               *optimal_dcfclk =  bw_from_dram /
+                               (dcn3_02_soc.return_bus_width_bytes *
+                                (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100));
+}
+
+void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+       unsigned int i, j;
+       unsigned int num_states = 0;
+
+       unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0};
+       unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0};
+       unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0};
+       unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0};
+
+       unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200};
+       unsigned int num_dcfclk_sta_targets = 4;
+       unsigned int num_uclk_states;
+
+       dc_assert_fp_enabled();
+
+       if (dc->ctx->dc_bios->vram_info.num_chans)
+               dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+
+       if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+               dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+
+       dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+       dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+       if (bw_params->clk_table.entries[0].memclk_mhz) {
+               int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0;
+
+               for (i = 0; i < MAX_NUM_DPM_LVL; i++) {
+                       if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz)
+                               max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
+                       if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz)
+                               max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz;
+                       if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz)
+                               max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz;
+                       if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz)
+                               max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz;
+               }
+               if (!max_dcfclk_mhz)
+                       max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz;
+               if (!max_dispclk_mhz)
+                       max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz;
+               if (!max_dppclk_mhz)
+                       max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz;
+               if (!max_phyclk_mhz)
+                       max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz;
+
+               if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+                       /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */
+                       dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz;
+                       num_dcfclk_sta_targets++;
+               } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) {
+                       /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */
+                       for (i = 0; i < num_dcfclk_sta_targets; i++) {
+                               if (dcfclk_sta_targets[i] > max_dcfclk_mhz) {
+                                       dcfclk_sta_targets[i] = max_dcfclk_mhz;
+                                       break;
+                               }
+                       }
+                       /* Update size of array since we "removed" duplicates */
+                       num_dcfclk_sta_targets = i + 1;
+               }
+
+               num_uclk_states = bw_params->clk_table.num_entries;
+
+               /* Calculate optimal dcfclk for each uclk */
+               for (i = 0; i < num_uclk_states; i++) {
+                       dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16,
+                                       &optimal_dcfclk_for_uclk[i], NULL);
+                       if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz)
+                               optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz;
+               }
+
+               /* Calculate optimal uclk for each dcfclk sta target */
+               for (i = 0; i < num_dcfclk_sta_targets; i++) {
+                       for (j = 0; j < num_uclk_states; j++) {
+                               if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
+                                       optimal_uclk_for_dcfclk_sta_targets[i] =
+                                                       bw_params->clk_table.entries[j].memclk_mhz * 16;
+                                       break;
+                               }
+                       }
+               }
+
+               i = 0;
+               j = 0;
+               /* create the final dcfclk and uclk table */
+               while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
+                       if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+                               dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+                               dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+                       } else {
+                               if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+                                       dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+                                       dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+                               } else {
+                                       j = num_uclk_states;
+                               }
+                       }
+               }
+
+               while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) {
+                       dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
+                       dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
+               }
+
+               while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES &&
+                               optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) {
+                       dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j];
+                       dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16;
+               }
+
+               dcn3_02_soc.num_states = num_states;
+               for (i = 0; i < dcn3_02_soc.num_states; i++) {
+                       dcn3_02_soc.clock_limits[i].state = i;
+                       dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i];
+                       dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i];
+                       dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i];
+
+                       /* Fill all states with max values of all other clocks */
+                       dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz;
+                       dcn3_02_soc.clock_limits[i].dppclk_mhz  = max_dppclk_mhz;
+                       dcn3_02_soc.clock_limits[i].phyclk_mhz  = max_phyclk_mhz;
+                       /* Populate from bw_params for DTBCLK, SOCCLK */
+                       if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].dtbclk_mhz  = bw_params->clk_table.entries[i].dtbclk_mhz;
+                       if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0)
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz;
+                       else
+                               dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz;
+                       /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */
+                       /* FCLK, PHYCLK_D18, DSCCLK */
+                       dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz;
+                       dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz;
+               }
+               /* re-init DML with updated bb */
+               dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+               if (dc->current_state)
+                       dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30);
+       }
+}
+
+void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
+{
+
+                       dc_assert_fp_enabled();
+
+                       if (bb_info.dram_clock_change_latency_100ns > 0)
+                               dcn3_02_soc.dram_clock_change_latency_us =
+                                       bb_info.dram_clock_change_latency_100ns * 10;
+
+                       if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+                               dcn3_02_soc.sr_enter_plus_exit_time_us =
+                                       bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+                       if (bb_info.dram_sr_exit_latency_100ns > 0)
+                               dcn3_02_soc.sr_exit_time_us =
+                                       bb_info.dram_sr_exit_latency_100ns * 10;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h
new file mode 100644 (file)
index 0000000..548305d
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN302_FPU_H__
+#define __DCN302_FPU_H__
+
+void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
+void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+#endif /* __DCN302_FPU_H__*/
index 6feb23432f8dba86400c9d144d7308575ced09aa..e4b9fd31223c99b968bd6ad94cc5238110261408 100644 (file)
@@ -64,6 +64,8 @@ typedef struct {
        double DCFCLKDeepSleep;
        unsigned int DPPPerPlane;
        bool ScalerEnabled;
+       double VRatio;
+       double VRatioChroma;
        enum scan_direction_class SourceScan;
        unsigned int BlockWidth256BytesY;
        unsigned int BlockHeight256BytesY;
@@ -942,6 +944,7 @@ static bool CalculatePrefetchSchedule(
        double dst_y_prefetch_equ;
        double Tsw_oto;
        double prefetch_bw_oto;
+       double prefetch_bw_pr;
        double Tvm_oto;
        double Tr0_oto;
        double Tvm_oto_lines;
@@ -971,6 +974,7 @@ static bool CalculatePrefetchSchedule(
        double min_Lsw;
        double Tsw_est1 = 0;
        double Tsw_est3 = 0;
+       double  max_Tsw = 0;
 
        if (GPUVMEnable == true && HostVMEnable == true) {
                HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
@@ -1111,11 +1115,14 @@ static bool CalculatePrefetchSchedule(
                bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4;
        else
                bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC;
-
+       /*rev 99*/
+       prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane);
+    max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
        prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC;
        prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime));
+    prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw);
 
-       min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre;
+       min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
        Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
        Tsw_oto = Lsw_oto * LineTime;
 
@@ -1389,7 +1396,7 @@ static bool CalculatePrefetchSchedule(
                        dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC);
                        dml_print("DML::%s: VInitPreFillC = %f\n", __func__, VInitPreFillC);
 #endif
-                       if ((SwathHeightC > 4)) {
+                       if ((SwathHeightC > 4) || VInitPreFillC > 3) {
                                if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
                                        *VRatioPrefetchC = dml_max(
                                                        *VRatioPrefetchC,
@@ -2663,6 +2670,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
                        myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep;
                        myPipe.DPPPerPlane = v->DPPPerPlane[k];
                        myPipe.ScalerEnabled = v->ScalerEnabled[k];
+                       myPipe.VRatio = v->VRatio[k];
+                       myPipe.VRatioChroma = v->VRatioChroma[k];
                        myPipe.SourceScan = v->SourceScan[k];
                        myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k];
                        myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k];
@@ -3911,6 +3920,9 @@ static noinline void CalculatePrefetchSchedulePerPlane(
        myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j];
        myPipe.DPPPerPlane = v->NoOfDPP[i][j][k];
        myPipe.ScalerEnabled = v->ScalerEnabled[k];
+       myPipe.VRatio = mode_lib->vba.VRatio[k];
+       myPipe.VRatioChroma = mode_lib->vba.VRatioChroma[k];
+
        myPipe.SourceScan = v->SourceScan[k];
        myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k];
        myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k];
@@ -4987,6 +4999,17 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                                &v->meta_row_bandwidth[i][j][k],
                                                &v->dpte_row_bandwidth[i][j][k]);
                        }
+                       /*DCCMetaBufferSizeSupport(i, j) = True
+                       For k = 0 To NumberOfActivePlanes - 1
+                       If MetaRowBytes(i, j, k) > 24064 Then
+                       DCCMetaBufferSizeSupport(i, j) = False
+                       End If
+                       Next k*/
+                       v->DCCMetaBufferSizeSupport[i][j] = true;
+                       for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+                               if (v->MetaRowBytes[i][j][k] > 24064)
+                                       v->DCCMetaBufferSizeSupport[i][j] = false;
+                       }
                        v->UrgLatency[i] = CalculateUrgentLatency(
                                        v->UrgentLatencyPixelDataOnly,
                                        v->UrgentLatencyPixelMixedWithVMData,
index 0fad15020c74011fa9334239cfab29ba3491623a..c0740dbdcc2e63d9b7cdc067b7b17ba7b52fcab6 100644 (file)
@@ -47,6 +47,7 @@ static void recalculate_params(
                unsigned int num_pipes);
 
 static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
+static void cache_debug_params(struct display_mode_lib *mode_lib);
 
 unsigned int dml_get_voltage_level(
                struct display_mode_lib *mode_lib,
@@ -73,6 +74,7 @@ unsigned int dml_get_voltage_level(
                PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
        }
        mode_lib->funcs.validate(mode_lib);
+       cache_debug_params(mode_lib);
 
        return mode_lib->vba.VoltageLevel;
 }
@@ -745,6 +747,28 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
        mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable && !!ip->hostvm_enable;
 }
 
+/**
+ * ********************************************************************************************
+ * cache_debug_params: Cache any params that needed to be maintained from the initial validation
+ * for debug purposes.
+ *
+ * The DML getters can modify some of the VBA params that we are interested in (for example when
+ * calculating with dummy p-state latency), so cache any params here that we want for debugging
+ *
+ * @param [in] mode_lib: mode_lib input/output of validate call
+ *
+ * @return: void
+ *
+ * ********************************************************************************************
+ */
+static void cache_debug_params(struct display_mode_lib *mode_lib)
+{
+       int k = 0;
+
+       for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++)
+               mode_lib->vba.CachedActiveDRAMClockChangeLatencyMargin[k] = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+}
+
 // in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
 // rather than working them out as in recalculate_ms
 static void recalculate_params(
index 90e87961fe3ebb96146e5556686fd8ebc5975134..0603b32971a6ff50c181a7eca748f1d3aceda4df 100644 (file)
@@ -195,6 +195,7 @@ struct vba_vars_st {
        unsigned int LBLatencyHidingSourceLinesY;
        unsigned int LBLatencyHidingSourceLinesC;
        double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
+       double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only
        double MinActiveDRAMClockChangeMargin;
        double InitFillLevel;
        double FinalFillMargin;
@@ -544,6 +545,8 @@ struct vba_vars_st {
        bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES];
        double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES];
        bool ROBSupport[DC__VOLTAGE_STATES][2];
+       //based on rev 99: Dim DCCMetaBufferSizeSupport(NumberOfStates, 1) As Boolean
+       bool DCCMetaBufferSizeSupport[DC__VOLTAGE_STATES][2];
        bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES][2];
        bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES][2];
        double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES][2];
index ec636d06e18c721115d61ed8885edc7b36dabea8..ef75eb7d5adc3eb02f4f00a12860c2098e1a534a 100644 (file)
@@ -68,7 +68,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
        int sel = table_hash(mode, bpc, max_min);
        int table_size = 0;
        int index;
-       const struct qp_entry *table = 0L;
+       const struct qp_entry *table = NULL;
 
        // alias enum
        enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
index 5f6ae3edb755bdeca0d1f4350263509e78d1a93b..3b7df1ac26be3e7e64828c40ba8903db0d811b54 100644 (file)
@@ -42,8 +42,8 @@
 
 #include "nbio/nbio_7_4_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "mmhub/mmhub_2_0_0_offset.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
index 0046219a1cc7c154eb0c696db44afba728bc5257..6b6b7c7bd12f365101bacc7f8f2c89c5f1bfe41b 100644 (file)
@@ -40,8 +40,8 @@
 
 #include "nbio/nbio_7_4_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "mmhub/mmhub_2_0_0_offset.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
index 943240e2809e95cff2d1a9d4e9a86d9ec57f929f..951c9b60917d08bfc5b650da98bde5d149042fc0 100644 (file)
@@ -54,6 +54,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 #include "dm_cp_psp.h"
 #endif
+#include "link_hwss.h"
 
 /************ link *****************/
 struct link_init_data {
@@ -249,12 +250,10 @@ struct resource_pool {
        /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/
        unsigned int usb4_dpia_count;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        unsigned int hpo_dp_stream_enc_count;
        struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS];
        unsigned int hpo_dp_link_enc_count;
        struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
-#endif
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dc_3dlut *mpc_lut[MAX_PIPES];
        struct dc_transfer_func *mpc_shaper[MAX_PIPES];
@@ -307,9 +306,7 @@ struct stream_resource {
        struct display_stream_compressor *dsc;
        struct timing_generator *tg;
        struct stream_encoder *stream_enc;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
-#endif
        struct audio *audio;
 
        struct pixel_clk_params pix_clk_params;
@@ -334,18 +331,12 @@ struct plane_resource {
        struct dcn_fe_bandwidth bw;
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF
 #define LINK_RES_HPO_DP_REC_MAP__SHIFT 0
-#endif
 
 /* all mappable hardware resources used to enable a link */
 struct link_resource {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_link_encoder *hpo_dp_link_enc;
-#else
-       void *dummy;
-#endif
 };
 
 union pipe_update_flags {
@@ -425,11 +416,9 @@ struct resource_context {
        uint8_t dp_clock_source_ref_count;
        bool is_dsc_acquired[MAX_PIPES];
        struct link_enc_cfg_context link_enc_cfg_ctx;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
        unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
        int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
-#endif
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool is_mpc_3dlut_acquired[MAX_PIPES];
 #endif
index cd52813a84329fba533f63e9055555aad16a2a44..477c4d9a972fa9fc5e15faf40a96ce42c3a91d16 100644 (file)
@@ -54,22 +54,13 @@ enum {
        PEAK_FACTOR_X1000 = 1006,
 };
 
-bool dp_verify_link_cap(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       struct dc_link_settings *known_limit_link_setting,
-       int *fail_count);
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
 
 bool dp_verify_link_cap_with_retries(
        struct dc_link *link,
-       const struct link_resource *link_res,
        struct dc_link_settings *known_limit_link_setting,
        int attempts);
 
-bool dp_verify_mst_link_cap(
-       struct dc_link *link,
-       const struct link_resource *link_res);
-
 bool dp_validate_mode_timing(
        struct dc_link *link,
        const struct dc_crtc_timing *timing);
@@ -114,6 +105,9 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
 bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
 
 void dpcd_set_source_specific_data(struct dc_link *link);
+
+void dpcd_update_cable_id(struct dc_link *link);
+
 /* Write DPCD link configuration data. */
 enum dc_status dpcd_set_link_settings(
        struct dc_link *link,
@@ -173,7 +167,6 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
 
 enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
 void dp_set_fec_enable(struct dc_link *link, bool enable);
-struct link_encoder *dp_get_link_enc(struct dc_link *link);
 bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
 void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
@@ -222,8 +215,47 @@ void disable_dp_hpo_output(struct dc_link *link,
                enum signal_type signal);
 void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
 bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
 
 bool dp_retrieve_lttpr_cap(struct dc_link *link);
 void edp_panel_backlight_power_on(struct dc_link *link);
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
+void dp_enable_link_phy(
+       struct dc_link *link,
+       const struct link_resource *link_res,
+       enum signal_type signal,
+       enum clock_source_id clock_source,
+       const struct dc_link_settings *link_settings);
+void edp_add_delay_for_T9(struct dc_link *link);
+bool edp_receiver_ready_T9(struct dc_link *link);
+bool edp_receiver_ready_T7(struct dc_link *link);
+
+void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
+               enum signal_type signal);
+
+void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
+               enum signal_type signal);
+
+bool dp_set_hw_training_pattern(
+               struct dc_link *link,
+               const struct link_resource *link_res,
+               enum dc_dp_training_pattern pattern,
+               uint32_t offset);
+
+void dp_set_hw_lane_settings(
+               struct dc_link *link,
+               const struct link_resource *link_res,
+               const struct link_training_settings *link_settings,
+               uint32_t offset);
+
+void dp_set_hw_test_pattern(
+               struct dc_link *link,
+               const struct link_resource *link_res,
+               enum dp_test_pattern test_pattern,
+               uint8_t *custom_pattern,
+               uint32_t custom_pattern_size);
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+               struct dc_link_settings *link_setting,
+               bool skip_video_pattern);
 #endif /* __DC_LINK_DP_H__ */
index c940fdfda144286a36d040b4026e5d5b22226ad6..b2fa4de47734d3e436d219d27af6647abefdec94 100644 (file)
@@ -79,7 +79,7 @@ struct dccg_funcs {
        void (*otg_drop_pixel)(struct dccg *dccg,
                        uint32_t otg_inst);
        void (*dccg_init)(struct dccg *dccg);
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+
        void (*set_dpstreamclk)(
                        struct dccg *dccg,
                        enum hdmistreamclk_source src,
@@ -102,7 +102,7 @@ struct dccg_funcs {
        void (*disable_symclk32_le)(
                        struct dccg *dccg,
                        int hpo_le_inst);
-#endif
+
        void (*set_physymclk)(
                        struct dccg *dccg,
                        int phy_inst,
index 10ecbc667ffa47604e1d8ef0377ad25bc6aa3455..d89bd55f110feadb3f3e4563e6b532c14509d157 100644 (file)
 #define MAX_PIPES 6
 #define MAX_DIG_LINK_ENCODERS 7
 #define MAX_DWB_PIPES  1
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 #define MAX_HPO_DP2_ENCODERS   4
 #define MAX_HPO_DP2_LINK_ENCODERS      2
-#endif
 
 struct gamma_curve {
        uint32_t offset;
index 2ce15cd10d80fd797991519900f4f583c129deec..2013a70603aedbf06299be449eebb604089a7997 100644 (file)
@@ -162,7 +162,8 @@ struct link_encoder_funcs {
        void (*disable_output)(struct link_encoder *link_enc,
                enum signal_type signal);
        void (*dp_set_lane_settings)(struct link_encoder *enc,
-               const struct link_training_settings *link_settings);
+               const struct dc_link_settings *link_settings,
+               const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
        void (*dp_set_phy_pattern)(struct link_encoder *enc,
                const struct encoder_set_dp_phy_pattern_param *para);
        void (*update_mst_stream_allocation_table)(
@@ -220,7 +221,6 @@ enum link_enc_cfg_mode {
        LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum dp2_link_mode {
        DP2_LINK_TRAINING_TPS1,
        DP2_LINK_TRAINING_TPS2,
@@ -306,6 +306,5 @@ struct hpo_dp_link_encoder_funcs {
                const struct dc_link_settings *link_settings,
                uint8_t ffe_preset);
 };
-#endif
 
 #endif /* LINK_ENCODER_H_ */
index 073f8b667eff56ab3249d2214e94a9a88b25604c..678c2065e5e88fdaeac6b68f33f67d61087b7ed7 100644 (file)
@@ -164,10 +164,6 @@ struct stream_encoder_funcs {
        void (*stop_dp_info_packets)(
                struct stream_encoder *enc);
 
-       void (*reset_fifo)(
-               struct stream_encoder *enc
-       );
-
        void (*dp_blank)(
                struct dc_link *link,
                struct stream_encoder *enc);
@@ -249,7 +245,6 @@ struct stream_encoder_funcs {
                struct stream_encoder *enc);
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 struct hpo_dp_stream_encoder_state {
        uint32_t stream_enc_enabled;
        uint32_t vid_stream_enabled;
@@ -328,7 +323,10 @@ struct hpo_dp_stream_encoder_funcs {
        void (*read_state)(
                        struct hpo_dp_stream_encoder *enc,
                        struct hpo_dp_stream_encoder_state *state);
+
+       void (*set_hblank_min_symbol_width)(
+                       struct hpo_dp_stream_encoder *enc,
+                       uint16_t width);
 };
-#endif
 
 #endif /* STREAM_ENCODER_H_ */
index c29320b3855da4da5ed5403e48fa0bfa32cced0e..59a704781e3490af8dd86f2b9e0f234337a7faf8 100644 (file)
@@ -100,9 +100,7 @@ enum crc_selection {
 
 enum otg_out_mux_dest {
        OUT_MUX_DIO = 0,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        OUT_MUX_HPO_DP = 2,
-#endif
 };
 
 enum h_timing_div_mode {
index c2008258c50a5501b7bb98d5e5abdb58056ba008..280c8764b6369a79e5c5da274ae29691883d737c 100644 (file)
@@ -41,9 +41,7 @@ struct dce_hwseq_wa {
        bool DEGVIDCN10_254;
        bool DEGVIDCN21;
        bool disallow_self_refresh_during_multi_plane_transition;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool dp_hpo_and_otg_sequence;
-#endif
 };
 
 struct hwseq_wa_state {
index 3f12b1600d2afc3a648b5b0abc46110bd6f6623f..d561f86d503ce1263697d07a0a25e9d2e7f30f8e 100644 (file)
@@ -27,7 +27,7 @@
 #define __LINK_DPCD_H__
 #include <inc/core_status.h>
 #include <dc_link.h>
-#include <inc/link_hwss.h>
+#include <dc_link_dp.h>
 
 enum dc_status core_link_read_dpcd(
                struct dc_link *link,
index a4e43b4826e0e7b9fde11d2c4091bb9f71fe5fbf..c2f08514a1d9ff655d0e787b001f2d34023cc056 100644 (file)
@@ -96,6 +96,9 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
                struct dc *dc,
                const struct dc_stream_state *stream);
 
+/* Return DIG link encoder. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc(const struct dc_link *link);
+
 /* Return true if encoder available to use. */
 bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link);
 
index 69d63763a10e185c3a030f1a5c6a69530c47da05..3b3090e3d327626e18ecafd81bd059a999f3c71b 100644 (file)
 #ifndef __DC_LINK_HWSS_H__
 #define __DC_LINK_HWSS_H__
 
-struct gpio *get_hpd_gpio(struct dc_bios *dcb,
-               struct graphics_object_id link_id,
-               struct gpio_service *gpio_service);
+/* include basic type headers only */
+#include "dc_dp_types.h"
+#include "signal_types.h"
+#include "grph_object_id.h"
+#include "fixed31_32.h"
 
-void dp_enable_link_phy(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum signal_type signal,
-       enum clock_source_id clock_source,
-       const struct dc_link_settings *link_settings);
+/* forward declare dc core types */
+struct dc_link;
+struct link_resource;
+struct pipe_ctx;
+struct encoder_set_dp_phy_pattern_param;
 
-void dp_receiver_power_ctrl(struct dc_link *link, bool on);
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
-void edp_add_delay_for_T9(struct dc_link *link);
-bool edp_receiver_ready_T9(struct dc_link *link);
-bool edp_receiver_ready_T7(struct dc_link *link);
+struct link_hwss_ext {
+       /* function pointers below require check for NULL at all time
+        * *********************************************************************
+        */
+       void (*set_hblank_min_symbol_width)(struct pipe_ctx *pipe_ctx,
+                       const struct dc_link_settings *link_settings,
+                       struct fixed31_32 throttled_vcp_size);
+       void (*set_throttled_vcp_size)(struct pipe_ctx *pipe_ctx,
+                       struct fixed31_32 throttled_vcp_size);
+       void (*enable_dp_link_output)(struct dc_link *link,
+                       const struct link_resource *link_res,
+                       enum signal_type signal,
+                       enum clock_source_id clock_source,
+                       const struct dc_link_settings *link_settings);
+       void (*disable_dp_link_output)(struct dc_link *link,
+                       const struct link_resource *link_res,
+                       enum signal_type signal);
+       void (*set_dp_link_test_pattern)(struct dc_link *link,
+                       const struct link_resource *link_res,
+                       struct encoder_set_dp_phy_pattern_param *tp_params);
+       void (*set_dp_lane_settings)(struct dc_link *link,
+               const struct link_resource *link_res,
+               const struct dc_link_settings *link_settings,
+               const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+};
 
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
-               enum signal_type signal);
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
-               enum signal_type signal);
-
-bool dp_set_hw_training_pattern(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum dc_dp_training_pattern pattern,
-       uint32_t offset);
-
-void dp_set_hw_lane_settings(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       const struct link_training_settings *link_settings,
-       uint32_t offset);
-
-void dp_set_hw_test_pattern(
-       struct dc_link *link,
-       const struct link_resource *link_res,
-       enum dp_test_pattern test_pattern,
-       uint8_t *custom_pattern,
-       uint32_t custom_pattern_size);
-
-void dp_retrain_link_dp_test(struct dc_link *link,
-               struct dc_link_settings *link_setting,
-               bool skip_video_pattern);
+struct link_hwss {
+       struct link_hwss_ext ext;
 
+       /* function pointers below MUST be assigned to all types of link_hwss
+        * *********************************************************************
+        */
+       void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
+       void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
+};
 #endif /* __DC_LINK_HWSS_H__ */
+
index 2470405e996bc5b9f1b1008233467194714fa769..a402df225a76e9d0bb4408c5fd843041542f5306 100644 (file)
@@ -498,6 +498,40 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
                uint8_t shift1, uint32_t mask1, uint32_t field_value1,
                ...);
 
+/* indirect register access
+ * underlying implementation determines which index/data pair to be used
+ * in a synchronous way
+ */
+#define IX_REG_SET_N_SYNC(index, n, initial_val, ...)  \
+               generic_indirect_reg_update_ex_sync(CTX, \
+                               IND_REG(index), \
+                               initial_val, \
+                               n, __VA_ARGS__)
+
+#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2)   \
+               IX_REG_SET_N_SYNC(index, 2, init_value, \
+                               FN(reg, f1), v1,\
+                               FN(reg, f2), v2)
+
+#define IX_REG_GET_N_SYNC(index, n, ...) \
+               generic_indirect_reg_get_sync(CTX, \
+                               IND_REG(index), \
+                               n, __VA_ARGS__)
+
+#define IX_REG_GET_SYNC(index, field, val) \
+               IX_REG_GET_N_SYNC(index, 1, \
+                               FN(data_reg_name, field), val)
+
+uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx,
+               uint32_t index, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+               ...);
+
+uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx,
+               uint32_t index, uint32_t reg_val, int n,
+               uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+               ...);
+
 /* register offload macros
  *
  * instead of MMIO to register directly, in some cases we want
index dbfe6690ded8a7353b26960067780225724b2c81..2369f38ed06f1b95079205f64d1d4524c1846d7b 100644 (file)
@@ -54,10 +54,8 @@ struct resource_caps {
        int num_dsc;
        unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output).
        unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters).
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        int num_hpo_dp_stream_encoder;
        int num_hpo_dp_link_encoder;
-#endif
        int num_mpc_3dlut;
 };
 
@@ -77,14 +75,12 @@ struct resource_create_funcs {
        struct stream_encoder *(*create_stream_encoder)(
                        enum engine_id eng_id, struct dc_context *ctx);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)(
                        enum engine_id eng_id, struct dc_context *ctx);
 
        struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)(
                        uint8_t inst,
                        struct dc_context *ctx);
-#endif
 
        struct dce_hwseq *(*create_hwseq)(
                        struct dc_context *ctx);
@@ -205,12 +201,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
 
 int get_num_odm_splits(struct pipe_ctx *pipe);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
-               const struct resource_context *res_ctx,
-               const struct resource_pool *pool,
-               const struct dc_link *link);
-#endif
+bool get_temp_dp_link_res(struct dc_link *link,
+               struct link_resource *link_res,
+               struct dc_link_settings *link_settings);
 
 void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
        struct dc_state *context);
@@ -221,4 +214,7 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
 
 uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
 
+const struct link_hwss *get_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
+
 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
index 914ce2ce1c2fcd0aa2e05a88058fd5192563c280..0b68c08fac3f0877c4c6f7b3da5d081bc0ac6629 100644 (file)
@@ -37,8 +37,8 @@
 
 #include "nbio/nbio_7_4_offset.h"
 
-#include "dcn/dpcs_3_0_0_offset.h"
-#include "dcn/dpcs_3_0_0_sh_mask.h"
+#include "dpcs/dpcs_3_0_0_offset.h"
+#include "dpcs/dpcs_3_0_0_sh_mask.h"
 
 #include "mmhub/mmhub_2_0_0_offset.h"
 #include "mmhub/mmhub_2_0_0_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
new file mode 100644 (file)
index 0000000..c4a69ba
--- /dev/null
@@ -0,0 +1,30 @@
+#
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the link sub-component of DAL.
+# It abstracts the control and status of back end pipe such as DIO, HPO, DPIA,
+# PHY, HPD, DDC and etc).
+
+LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o
+
+AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
new file mode 100644 (file)
index 0000000..0f84511
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_dio.h"
+#include "core_types.h"
+#include "dc_link_dp.h"
+#include "link_enc_cfg.h"
+
+void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+               struct fixed31_32 throttled_vcp_size)
+{
+       struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+
+       stream_encoder->funcs->set_throttled_vcp_size(
+                               stream_encoder,
+                               throttled_vcp_size);
+}
+
+void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+
+       link_enc->funcs->connect_dig_be_to_fe(link_enc,
+                       pipe_ctx->stream_res.stream_enc->id, true);
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               dp_source_sequence_trace(pipe_ctx->stream->link,
+                               DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+}
+
+void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+
+       link_enc->funcs->connect_dig_be_to_fe(
+                       link_enc,
+                       pipe_ctx->stream_res.stream_enc->id,
+                       false);
+       if (dc_is_dp_signal(pipe_ctx->stream->signal))
+               dp_source_sequence_trace(pipe_ctx->stream->link,
+                               DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
+
+}
+
+void enable_dio_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal,
+               enum clock_source_id clock_source,
+               const struct dc_link_settings *link_settings)
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+       if (dc_is_dp_sst_signal(signal))
+               link_enc->funcs->enable_dp_output(
+                               link_enc,
+                               link_settings,
+                               clock_source);
+       else
+               link_enc->funcs->enable_dp_mst_output(
+                               link_enc,
+                               link_settings,
+                               clock_source);
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+}
+
+void disable_dio_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal)
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+       link_enc->funcs->disable_output(link_enc, signal);
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+}
+
+void set_dio_dp_link_test_pattern(struct dc_link *link,
+               const struct link_resource *link_res,
+               struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+       link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+void set_dio_dp_lane_settings(struct dc_link *link,
+               const struct link_resource *link_res,
+               const struct dc_link_settings *link_settings,
+               const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+       struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+       link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
+}
+
+static const struct link_hwss dio_link_hwss = {
+       .setup_stream_encoder = setup_dio_stream_encoder,
+       .reset_stream_encoder = reset_dio_stream_encoder,
+       .ext = {
+               .set_throttled_vcp_size = set_dio_throttled_vcp_size,
+               .enable_dp_link_output = enable_dio_dp_link_output,
+               .disable_dp_link_output = disable_dio_dp_link_output,
+               .set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
+               .set_dp_lane_settings = set_dio_dp_lane_settings,
+       },
+};
+
+bool can_use_dio_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res)
+{
+       return link->link_enc != NULL;
+}
+
+const struct link_hwss *get_dio_link_hwss(void)
+{
+       return &dio_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
new file mode 100644 (file)
index 0000000..680df20
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_DIO_H__
+#define __LINK_HWSS_DIO_H__
+
+#include "link_hwss.h"
+
+const struct link_hwss *get_dio_link_hwss(void);
+bool can_use_dio_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
+void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+               struct fixed31_32 throttled_vcp_size);
+void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
+void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
+void enable_dio_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal,
+               enum clock_source_id clock_source,
+               const struct dc_link_settings *link_settings);
+void disable_dio_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal);
+void set_dio_dp_link_test_pattern(struct dc_link *link,
+               const struct link_resource *link_res,
+               struct encoder_set_dp_phy_pattern_param *tp_params);
+void set_dio_dp_lane_settings(struct dc_link *link,
+               const struct link_resource *link_res,
+               const struct dc_link_settings *link_settings,
+               const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+
+#endif /* __LINK_HWSS_DIO_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
new file mode 100644 (file)
index 0000000..35b2062
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_dpia.h"
+#include "core_types.h"
+#include "link_hwss_dio.h"
+
+static const struct link_hwss dpia_link_hwss = {
+       .setup_stream_encoder = setup_dio_stream_encoder,
+       .reset_stream_encoder = reset_dio_stream_encoder,
+       .ext = {
+               .set_throttled_vcp_size = set_dio_throttled_vcp_size,
+               .enable_dp_link_output = enable_dio_dp_link_output,
+               .disable_dp_link_output = disable_dio_dp_link_output,
+               .set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
+               .set_dp_lane_settings = set_dio_dp_lane_settings,
+       },
+};
+
+bool can_use_dpia_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res)
+{
+       return link->is_dig_mapping_flexible &&
+                       link->dc->res_pool->funcs->link_encs_assign;
+}
+
+const struct link_hwss *get_dpia_link_hwss(void)
+{
+       return &dpia_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h
new file mode 100644 (file)
index 0000000..ad16ec5
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_DPIA_H__
+#define __LINK_HWSS_DPIA_H__
+
+#include "link_hwss.h"
+
+const struct link_hwss *get_dpia_link_hwss(void);
+bool can_use_dpia_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
+
+#endif /* __LINK_HWSS_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
new file mode 100644 (file)
index 0000000..7491949
--- /dev/null
@@ -0,0 +1,254 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_hpo_dp.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+#include "dccg.h"
+#include "dc_link_dp.h"
+
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+       switch (link->link_enc->transmitter) {
+       case TRANSMITTER_UNIPHY_A:
+               return PHYD32CLKA;
+       case TRANSMITTER_UNIPHY_B:
+               return PHYD32CLKB;
+       case TRANSMITTER_UNIPHY_C:
+               return PHYD32CLKC;
+       case TRANSMITTER_UNIPHY_D:
+               return PHYD32CLKD;
+       case TRANSMITTER_UNIPHY_E:
+               return PHYD32CLKE;
+       default:
+               return PHYD32CLKA;
+       }
+}
+
+static void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
+               struct fixed31_32 throttled_vcp_size)
+{
+       struct hpo_dp_stream_encoder *hpo_dp_stream_encoder =
+                       pipe_ctx->stream_res.hpo_dp_stream_enc;
+       struct hpo_dp_link_encoder *hpo_dp_link_encoder =
+                       pipe_ctx->link_res.hpo_dp_link_enc;
+
+       hpo_dp_link_encoder->funcs->set_throttled_vcp_size(hpo_dp_link_encoder,
+                       hpo_dp_stream_encoder->inst,
+                       throttled_vcp_size);
+}
+
+static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
+               const struct dc_link_settings *link_settings,
+               struct fixed31_32 throttled_vcp_size)
+{
+       struct hpo_dp_stream_encoder *hpo_dp_stream_encoder =
+                       pipe_ctx->stream_res.hpo_dp_stream_enc;
+       struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+       struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank;
+       uint32_t link_bw_in_kbps =
+                       dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings);
+       uint16_t hblank_min_symbol_width = 0;
+
+       if (link_bw_in_kbps > 0) {
+               h_blank_in_ms = dc_fixpt_div(dc_fixpt_from_int(
+                               timing->h_total - timing->h_addressable),
+                               dc_fixpt_from_fraction(timing->pix_clk_100hz, 10));
+               time_slot_in_ms = dc_fixpt_from_fraction(32 * 4, link_bw_in_kbps);
+               mtp_cnt_per_h_blank = dc_fixpt_div(h_blank_in_ms,
+                               dc_fixpt_mul_int(time_slot_in_ms, 64));
+               hblank_min_symbol_width = dc_fixpt_floor(
+                               dc_fixpt_mul(mtp_cnt_per_h_blank, throttled_vcp_size));
+       }
+
+       hpo_dp_stream_encoder->funcs->set_hblank_min_symbol_width(hpo_dp_stream_encoder,
+                       hblank_min_symbol_width);
+}
+
+static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
+{
+       struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+       int count = 1;
+
+       while (odm_pipe != NULL) {
+               count++;
+               odm_pipe = odm_pipe->next_odm_pipe;
+       }
+
+       return count;
+}
+
+static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+       struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
+       struct dccg *dccg = dc->res_pool->dccg;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+       int odm_segment_count = get_odm_segment_count(pipe_ctx);
+       enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
+
+       dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst);
+       dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
+       dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk,
+                       odm_segment_count,
+                       &pipe_ctx->stream->timing);
+       stream_enc->funcs->enable_stream(stream_enc);
+       stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
+}
+
+static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+       struct dc *dc = pipe_ctx->stream->ctx->dc;
+       struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+       struct dccg *dccg = dc->res_pool->dccg;
+       struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+       stream_enc->funcs->disable(stream_enc);
+       dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing);
+       dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
+       dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
+}
+
+static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal,
+               enum clock_source_id clock_source,
+               const struct dc_link_settings *link_settings)
+{
+       const struct dc *dc = link->dc;
+       enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(link);
+       int phyd32clk_freq_khz = link_settings->link_rate == LINK_RATE_UHBR10 ? 312500 :
+                       link_settings->link_rate == LINK_RATE_UHBR13_5 ? 412875 :
+                       link_settings->link_rate == LINK_RATE_UHBR20 ? 625000 : 0;
+
+       dm_set_phyd32clk(dc->ctx, phyd32clk_freq_khz);
+       dc->res_pool->dccg->funcs->set_physymclk(
+                       dc->res_pool->dccg,
+                       link->link_enc_hw_inst,
+                       PHYSYMCLK_FORCE_SRC_PHYD32CLK,
+                       true);
+       dc->res_pool->dccg->funcs->enable_symclk32_le(
+                       dc->res_pool->dccg,
+                       link_res->hpo_dp_link_enc->inst,
+                       phyd32clk);
+       link_res->hpo_dp_link_enc->funcs->link_enable(
+                       link_res->hpo_dp_link_enc,
+                       link_settings->lane_count);
+
+}
+
+static void enable_hpo_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal,
+               enum clock_source_id clock_source,
+               const struct dc_link_settings *link_settings)
+{
+       if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment))
+               enable_hpo_dp_fpga_link_output(link, link_res, signal,
+                               clock_source, link_settings);
+       else
+               link_res->hpo_dp_link_enc->funcs->enable_link_phy(
+                               link_res->hpo_dp_link_enc,
+                               link_settings,
+                               link->link_enc->transmitter,
+                               link->link_enc->hpd_source);
+}
+
+
+static void disable_hpo_dp_fpga_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal)
+{
+       const struct dc *dc = link->dc;
+
+       link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
+       dc->res_pool->dccg->funcs->disable_symclk32_le(
+                       dc->res_pool->dccg,
+                       link_res->hpo_dp_link_enc->inst);
+       dc->res_pool->dccg->funcs->set_physymclk(
+                       dc->res_pool->dccg,
+                       link->link_enc_hw_inst,
+                       PHYSYMCLK_FORCE_SRC_SYMCLK,
+                       false);
+       dm_set_phyd32clk(dc->ctx, 0);
+}
+
+static void disable_hpo_dp_link_output(struct dc_link *link,
+               const struct link_resource *link_res,
+               enum signal_type signal)
+{
+       if (IS_FPGA_MAXIMUS_DC(link->dc->ctx->dce_environment)) {
+               disable_hpo_dp_fpga_link_output(link, link_res, signal);
+       } else {
+               link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);
+               link_res->hpo_dp_link_enc->funcs->disable_link_phy(
+                               link_res->hpo_dp_link_enc, signal);
+       }
+}
+
+static void set_hpo_dp_link_test_pattern(struct dc_link *link,
+               const struct link_resource *link_res,
+               struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+       link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
+                       link_res->hpo_dp_link_enc, tp_params);
+       dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+}
+
+static void set_hpo_dp_lane_settings(struct dc_link *link,
+               const struct link_resource *link_res,
+               const struct dc_link_settings *link_settings,
+               const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+       link_res->hpo_dp_link_enc->funcs->set_ffe(
+                       link_res->hpo_dp_link_enc,
+                       link_settings,
+                       lane_settings[0].FFE_PRESET.raw);
+}
+
+static const struct link_hwss hpo_dp_link_hwss = {
+       .setup_stream_encoder = setup_hpo_dp_stream_encoder,
+       .reset_stream_encoder = reset_hpo_dp_stream_encoder,
+       .ext = {
+               .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
+               .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
+               .enable_dp_link_output = enable_hpo_dp_link_output,
+               .disable_dp_link_output = disable_hpo_dp_link_output,
+               .set_dp_link_test_pattern  = set_hpo_dp_link_test_pattern,
+               .set_dp_lane_settings = set_hpo_dp_lane_settings,
+       },
+};
+
+bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res)
+{
+       return link_res->hpo_dp_link_enc != NULL;
+}
+
+const struct link_hwss *get_hpo_dp_link_hwss(void)
+{
+       return &hpo_dp_link_hwss;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h
new file mode 100644 (file)
index 0000000..57d447e
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_HPO_DP_H__
+#define __LINK_HWSS_HPO_DP_H__
+
+#include "link_hwss.h"
+
+bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
+const struct link_hwss *get_hpo_dp_link_hwss(void);
+
+
+#endif /* __LINK_HWSS_HPO_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
new file mode 100644 (file)
index 0000000..9df273c
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_hwss_hpo_frl.h"
+#include "core_types.h"
+#include "virtual/virtual_link_hwss.h"
+
+static const struct link_hwss hpo_frl_link_hwss = {
+       .setup_stream_encoder = virtual_setup_stream_encoder,
+       .reset_stream_encoder = virtual_reset_stream_encoder,
+};
+
+bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res)
+{
+       return link_res->hpo_frl_link_enc != NULL;
+}
+
+const struct link_hwss *get_hpo_frl_link_hwss(void)
+{
+       return &hpo_frl_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h
new file mode 100644 (file)
index 0000000..ea8d976
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_HWSS_HPO_FRL_H__
+#define __LINK_HWSS_HPO_FRL_H__
+
+#include "link_hwss.h"
+
+bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
+               const struct link_resource *link_res);
+const struct link_hwss *get_hpo_frl_link_hwss(void);
+
+#endif /* __LINK_HWSS_HPO_FRL_H__ */
index 07326d244d50a7333b43e0d3e02bc0dedca35f97..931facd4dab5d9764e5e3b5a223223bbc478a8f8 100644 (file)
@@ -23,7 +23,7 @@
 # Makefile for the virtual sub-component of DAL.
 # It provides the control and status of HW CRTC block.
 
-VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o
+VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o virtual_link_hwss.o
 
 AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL))
 
index 348e9a600a728961c14d8ee02d392e63c9f38182..df8bc44bc4beff89e1874dbe01daef6b8a0c979b 100644 (file)
@@ -63,7 +63,8 @@ static void virtual_link_encoder_disable_output(
 
 static void virtual_link_encoder_dp_set_lane_settings(
        struct link_encoder *enc,
-       const struct link_training_settings *link_settings) {}
+       const struct dc_link_settings *link_settings,
+       const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) {}
 
 static void virtual_link_encoder_dp_set_phy_pattern(
        struct link_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
new file mode 100644 (file)
index 0000000..525eba2
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "virtual_link_hwss.h"
+
+void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+}
+
+void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
+{
+}
+static const struct link_hwss virtual_link_hwss = {
+       .setup_stream_encoder = virtual_setup_stream_encoder,
+       .reset_stream_encoder = virtual_reset_stream_encoder,
+};
+
+const struct link_hwss *get_virtual_link_hwss(void)
+{
+       return &virtual_link_hwss;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
new file mode 100644 (file)
index 0000000..e6bcb4b
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_VIRTUAL_LINK_HWSS_H__
+#define __DC_VIRTUAL_LINK_HWSS_H__
+
+#include "core_types.h"
+
+void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx);
+void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx);
+const struct link_hwss *get_virtual_link_hwss(void);
+
+#endif /* __DC_VIRTUAL_LINK_HWSS_H__ */
index 873ecd04e01d0be69354983dd082533414509075..fa11776387063855f14cbf9bb4e411837e4d5528 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95
+#define DMUB_FW_VERSION_GIT_HASH 0x5189adbf
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 98
+#define DMUB_FW_VERSION_REVISION 103
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -367,8 +367,9 @@ union dmub_fw_boot_options {
                /**< 1 if all root clock gating is enabled and low power memory is enabled*/
                uint32_t power_optimization: 1;
                uint32_t diag_env: 1; /* 1 if diagnostic environment */
+               uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/
 
-               uint32_t reserved : 19; /**< reserved */
+               uint32_t reserved : 18; /**< reserved */
        } bits; /**< boot bits */
        uint32_t all; /**< 32-bit access to bits */
 };
@@ -523,7 +524,7 @@ union dmub_inbox0_cmd_lock_hw {
                uint32_t command_code: 8;
 
                /* NOTE: Must be have enough bits to match: enum hw_lock_client */
-               uint32_t hw_lock_client: 1;
+               uint32_t hw_lock_client: 2;
 
                /* NOTE: Below fields must match with: struct dmub_hw_lock_inst_flags */
                uint32_t otg_inst: 3;
@@ -538,7 +539,7 @@ union dmub_inbox0_cmd_lock_hw {
 
                uint32_t lock: 1;                               /**< Lock */
                uint32_t should_release: 1;             /**< Release */
-               uint32_t reserved: 8;                   /**< Reserved for extending more clients, HW, etc. */
+               uint32_t reserved: 7;                   /**< Reserved for extending more clients, HW, etc. */
        } bits;
        uint32_t all;
 };
@@ -644,6 +645,7 @@ enum dmub_cmd_type {
         * Command type used for OUTBOX1 notification enable
         */
        DMUB_CMD__OUTBOX1_ENABLE = 71,
+
        /**
         * Command type used for all idle optimization commands.
         */
@@ -656,6 +658,7 @@ enum dmub_cmd_type {
         * Command type used for all panel control commands.
         */
        DMUB_CMD__PANEL_CNTL = 74,
+
        /**
         * Command type used for interfacing with DPIA.
         */
index fa0569174aecf532089052c6f536ce07c286af92..82c651535628734f273596a599bc5b0558634b33 100644 (file)
@@ -441,7 +441,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
 bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
 {
        uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0);
-       bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED;
+       bool should_detect = (fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED) != 0;
        return should_detect;
 }
 
index b8ffb216ebc4793dca9f92109c335484001a09c7..cf4027cc3f4cfc6d07cd5395993741bb92f95df5 100644 (file)
@@ -152,10 +152,8 @@ struct bp_transmitter_control {
        enum signal_type signal;
        enum dc_color_depth color_depth; /* not used for DCE6.0 */
        enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        enum tx_ffe_id txffe_sel; /* used for DCN3 */
        enum engine_id hpo_engine_id; /* used for DCN3 */
-#endif
        struct graphics_object_id connector_obj_id;
        /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
         * be pixel clock * deep_color_ratio (in KHz)
index e4a2dfacab4c43cd195d7dae9cd2d6a62f5e6762..e672be6327cb351bde94b738ef7d9be9d9d7c253 100644 (file)
@@ -212,6 +212,7 @@ enum {
 #define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
 #endif
 #define DEVICE_ID_NV_13FE 0x13FE  // CYAN_SKILLFISH
+#define DEVICE_ID_NV_143F 0x143F
 #define FAMILY_VGH 144
 #define DEVICE_ID_VGH_163F 0x163F
 #define VANGOGH_A0 0x01
index a2b80514d83ebb239e06225825955d105120984f..fb289a5c873a4a6d86457568ef8c97be4ccb68e1 100644 (file)
@@ -34,6 +34,8 @@
 #define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24
 #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
 #define DP_BRANCH_DEVICE_ID_006037 0x006037
+#define DP_BRANCH_HW_REV_10 0x10
+#define DP_BRANCH_HW_REV_20 0x20
 
 #define DP_DEVICE_ID_38EC11 0x38EC11
 enum ddc_result {
@@ -121,4 +123,7 @@ struct av_sync_data {
 static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
 static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
 
+/*MST Dock*/
+static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
+
 #endif /* __DAL_DDC_SERVICE_TYPES_H__ */
index 270260e82b616029371131448bdeec8bda3809e2..ac822181359c4f6904eec040ea7e2f1860ba3d94 100644 (file)
@@ -144,14 +144,10 @@ enum dpcd_training_patterns {
        DPCD_TRAINING_PATTERN_1,
        DPCD_TRAINING_PATTERN_2,
        DPCD_TRAINING_PATTERN_3,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DPCD_TRAINING_PATTERN_4 = 7,
        DPCD_128b_132b_TPS1 = 1,
        DPCD_128b_132b_TPS2 = 2,
        DPCD_128b_132b_TPS2_CDS = 3,
-#else
-       DPCD_TRAINING_PATTERN_4 = 7
-#endif
 };
 
 /* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
index 84b299ff500a22ca65522ae7de045f6c2710f839..05ba188a7c3b90745cf94479189d225ac75bd837 100644 (file)
@@ -140,7 +140,6 @@ enum sync_source {
        SYNC_SOURCE_DUAL_GPU_PIN
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 enum tx_ffe_id {
        TX_FFE0 = 0,
        TX_FFE1,
@@ -150,7 +149,6 @@ enum tx_ffe_id {
        TX_FFE_PreShoot_Only,
        TX_FFE_No_FFE,
 };
-#endif
 
 /* connector sizes in millimeters - from BiosParserTypes.hpp */
 #define CONNECTOR_SIZE_DVI                     40
index 01775417cf4b65d29890fcea1fbfa9cf8532ba2f..fed1edc038d8328e8c553c0332a0a848e827b9fd 100644 (file)
@@ -184,14 +184,12 @@ enum engine_id {
        ENGINE_ID_DACA,
        ENGINE_ID_DACB,
        ENGINE_ID_VCE,  /* wireless display pseudo-encoder */
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        ENGINE_ID_HPO_0,
        ENGINE_ID_HPO_1,
        ENGINE_ID_HPO_DP_0,
        ENGINE_ID_HPO_DP_1,
        ENGINE_ID_HPO_DP_2,
        ENGINE_ID_HPO_DP_3,
-#endif
        ENGINE_ID_VIRTUAL,
 
        ENGINE_ID_COUNT,
index 424bccd364340fa31f3bc255802fda95e43c6572..447a56286dd06cc0a256cadad4a9cdd85f99ffa3 100644 (file)
@@ -74,12 +74,10 @@ enum link_training_result {
        LINK_TRAINING_LINK_LOSS,
        /* Abort link training (because sink unplugged) */
        LINK_TRAINING_ABORT,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DP_128b_132b_LT_FAILED,
        DP_128b_132b_MAX_LOOP_COUNT_REACHED,
        DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
        DP_128b_132b_CDS_DONE_TIMEOUT,
-#endif
 };
 
 enum lttpr_mode {
@@ -99,23 +97,19 @@ struct link_training_settings {
        enum dc_pre_emphasis *pre_emphasis;
        enum dc_post_cursor2 *post_cursor2;
        bool should_set_fec_ready;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* TODO - factor lane_settings out because it changes during LT */
        union dc_dp_ffe_preset *ffe_preset;
-#endif
 
        uint16_t cr_pattern_time;
        uint16_t eq_pattern_time;
        uint16_t cds_pattern_time;
        enum dc_dp_training_pattern pattern_for_cr;
        enum dc_dp_training_pattern pattern_for_eq;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        enum dc_dp_training_pattern pattern_for_cds;
 
        uint32_t eq_wait_time_limit;
        uint8_t eq_loop_count_limit;
        uint32_t cds_wait_time_limit;
-#endif
 
        bool enhanced_framing;
        enum lttpr_mode lttpr_mode;
@@ -161,7 +155,6 @@ enum dp_test_pattern {
        DP_TEST_PATTERN_CP2520_2,
        DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
        DP_TEST_PATTERN_CP2520_3,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DP_TEST_PATTERN_128b_132b_TPS1,
        DP_TEST_PATTERN_128b_132b_TPS2,
        DP_TEST_PATTERN_PRBS9,
@@ -171,20 +164,15 @@ enum dp_test_pattern {
        DP_TEST_PATTERN_PRBS31,
        DP_TEST_PATTERN_264BIT_CUSTOM,
        DP_TEST_PATTERN_SQUARE_PULSE,
-#endif
 
        /* Link Training Patterns */
        DP_TEST_PATTERN_TRAINING_PATTERN1,
        DP_TEST_PATTERN_TRAINING_PATTERN2,
        DP_TEST_PATTERN_TRAINING_PATTERN3,
        DP_TEST_PATTERN_TRAINING_PATTERN4,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE,
        DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
        DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
-#else
-       DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
-#endif
 
        /* link test patterns*/
        DP_TEST_PATTERN_COLOR_SQUARES,
index 1ab813b4fd14f8c4bb1f1d59177b15b1a843d6b4..1d8b746b02f248334e966bddf7695304cbcc22ba 100644 (file)
@@ -34,7 +34,8 @@ struct dc_info_packet;
 struct mod_vrr_params;
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-               struct dc_info_packet *info_packet);
+               struct dc_info_packet *info_packet,
+               enum dc_color_space cs);
 
 void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
                struct dc_info_packet *info_packet);
index 57f198de5e2cb3cdef09656ecc525ad73a34666d..b691aa45e84fbcec96c418d9df3fa7ec327eada5 100644 (file)
@@ -130,7 +130,8 @@ enum ColorimetryYCCDP {
 };
 
 void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
-               struct dc_info_packet *info_packet)
+               struct dc_info_packet *info_packet,
+               enum dc_color_space cs)
 {
        unsigned int vsc_packet_revision = vsc_packet_undefined;
        unsigned int i;
@@ -331,13 +332,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
                /* Set Colorimetry format based on pixel encoding */
                switch (stream->timing.pixel_encoding) {
                case PIXEL_ENCODING_RGB:
-                       if ((stream->output_color_space == COLOR_SPACE_SRGB) ||
-                                       (stream->output_color_space == COLOR_SPACE_SRGB_LIMITED))
+                       if ((cs == COLOR_SPACE_SRGB) ||
+                                       (cs == COLOR_SPACE_SRGB_LIMITED))
                                colorimetryFormat = ColorimetryRGB_DP_sRGB;
-                       else if (stream->output_color_space == COLOR_SPACE_ADOBERGB)
+                       else if (cs == COLOR_SPACE_ADOBERGB)
                                colorimetryFormat = ColorimetryRGB_DP_AdobeRGB;
-                       else if ((stream->output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) ||
-                                       (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
+                       else if ((cs == COLOR_SPACE_2020_RGB_FULLRANGE) ||
+                                       (cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE))
                                colorimetryFormat = ColorimetryRGB_DP_ITU_R_BT2020RGB;
                        break;
 
@@ -347,13 +348,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
                        /* Note: xvYCC probably not supported correctly here on DP since colorspace translation
                         * loses distinction between BT601 vs xvYCC601 in translation
                         */
-                       if (stream->output_color_space == COLOR_SPACE_YCBCR601)
+                       if (cs == COLOR_SPACE_YCBCR601)
                                colorimetryFormat = ColorimetryYCC_DP_ITU601;
-                       else if (stream->output_color_space == COLOR_SPACE_YCBCR709)
+                       else if (cs == COLOR_SPACE_YCBCR709)
                                colorimetryFormat = ColorimetryYCC_DP_ITU709;
-                       else if (stream->output_color_space == COLOR_SPACE_ADOBERGB)
+                       else if (cs == COLOR_SPACE_ADOBERGB)
                                colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
-                       else if (stream->output_color_space == COLOR_SPACE_2020_YCBCR)
+                       else if (cs == COLOR_SPACE_2020_YCBCR)
                                colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
                        break;
 
@@ -391,8 +392,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
                }
 
                /* all YCbCr are always limited range */
-               if ((stream->output_color_space == COLOR_SPACE_SRGB_LIMITED) ||
-                               (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) ||
+               if ((cs == COLOR_SPACE_SRGB_LIMITED) ||
+                               (cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE) ||
                                (pixelEncoding != 0x0)) {
                        info_packet->sb[17] |= 0x80; /* DB17 bit 7 set to 1 for CEA timing. */
                }
index f57a1478f0fed4f9744a1fb47267898e448ab3b8..fe4e585781bb5039f4dd47d700d0051de50ff42b 100644 (file)
@@ -83,6 +83,7 @@ enum amd_apu_flags {
 * @AMD_IP_BLOCK_TYPE_VCN: Video Core/Codec Next
 * @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
 * @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
+* @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types
 */
 enum amd_ip_block_type {
        AMD_IP_BLOCK_TYPE_COMMON,
@@ -268,7 +269,6 @@ enum amd_dpm_forced_level;
  * @set_clockgating_state: enable/disable cg for the IP block
  * @set_powergating_state: enable/disable pg for the IP block
  * @get_clockgating_state: get current clockgating status
- * @enable_umd_pstate: enable UMD powerstate
  *
  * These hooks provide an interface for controlling the operational state
  * of IP blocks. After acquiring a list of IP blocks for the GPU in use,
@@ -299,7 +299,6 @@ struct amd_ip_funcs {
        int (*set_powergating_state)(void *handle,
                                     enum amd_powergating_state state);
        void (*get_clockgating_state)(void *handle, u32 *flags);
-       int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
 };
 
 
similarity index 99%
rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_offset.h
rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h
index 67faaf68e9d7cff28a5ba5482b8b87f4dbccd6cd..0bb47e06eee8233da4af6c80162314bf2bf74733 100644 (file)
@@ -1,3 +1,10 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Authors: AMD
+ */
+
 #ifndef _dpcs_3_0_0_OFFSET_HEADER
 #define _dpcs_3_0_0_OFFSET_HEADER
 
similarity index 99%
rename from drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_3_0_0_sh_mask.h
rename to drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h
index b4ef50a72868d3c35e030c16c3b19b2317292f9f..23fa1121a9673e652a6a2d1cd0f91b4499d5f69c 100644 (file)
@@ -1,3 +1,10 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Authors: AMD
+ */
+
 #ifndef _dpcs_3_0_0_SH_MASK_HEADER
 #define _dpcs_3_0_0_SH_MASK_HEADER
 
index ac941f62cbed13dae4686b8779aa2da931b06b9f..2f60cf35a4444f8af1d5d3d661b781475d31ef5b 100644 (file)
@@ -269,18 +269,9 @@ struct kfd2kgd_calls {
        int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd,
                                unsigned int timeout);
 
-       int (*address_watch_disable)(struct amdgpu_device *adev);
-       int (*address_watch_execute)(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       uint32_t cntl_val,
-                                       uint32_t addr_hi,
-                                       uint32_t addr_lo);
        int (*wave_control_execute)(struct amdgpu_device *adev,
                                        uint32_t gfx_index_val,
                                        uint32_t sq_cmd);
-       uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev,
-                                       unsigned int watch_point_id,
-                                       unsigned int reg_offset);
        bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev,
                                        uint8_t vmid,
                                        uint16_t *p_pasid);
index 5c0867ebcfce177b8984a652a809bd078fcf541f..a4c267f159591898b95feff32df06e6bcf8efd54 100644 (file)
 #define __KGD_PP_INTERFACE_H__
 
 extern const struct amdgpu_ip_block_version pp_smu_ip_block;
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
+extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
+
+enum smu_event_type {
+       SMU_EVENT_RESET_COMPLETE = 0,
+};
 
 struct amd_vce_state {
        /* vce clocks */
@@ -308,12 +315,13 @@ struct amd_pm_funcs {
                                void  *rps,
                                bool  *equal);
 /* export for sysfs */
-       void (*set_fan_control_mode)(void *handle, u32 mode);
-       u32 (*get_fan_control_mode)(void *handle);
+       int (*set_fan_control_mode)(void *handle, u32 mode);
+       int (*get_fan_control_mode)(void *handle, u32 *fan_mode);
        int (*set_fan_speed_pwm)(void *handle, u32 speed);
        int (*get_fan_speed_pwm)(void *handle, u32 *speed);
        int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+       int (*emit_clock_levels)(void *handle, enum pp_clock_type type, char *buf, int *offset);
        int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
        int (*get_sclk_od)(void *handle);
        int (*set_sclk_od)(void *handle, uint32_t value);
@@ -400,6 +408,7 @@ struct amd_pm_funcs {
        int (*get_dpm_clock_table)(void *handle,
                                   struct dpm_clocks *clock_table);
        int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
+       void (*pm_compute_clocks)(void *handle);
 };
 
 struct metrics_table_header {
index 8cf6eff1ea93ddf3c84e365196ad00e56ee58b52..51751db436b074d39be1b7fe4c515ca2c5a827db 100644 (file)
 #
 
 subdir-ccflags-y += \
-               -I$(FULL_AMD_PATH)/pm/inc/  \
                -I$(FULL_AMD_PATH)/include/asic_reg  \
                -I$(FULL_AMD_PATH)/include  \
+               -I$(FULL_AMD_PATH)/pm/inc/  \
                -I$(FULL_AMD_PATH)/pm/swsmu \
+               -I$(FULL_AMD_PATH)/pm/swsmu/inc \
+               -I$(FULL_AMD_PATH)/pm/swsmu/inc/pmfw_if \
                -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \
                -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \
                -I$(FULL_AMD_PATH)/pm/swsmu/smu13 \
-               -I$(FULL_AMD_PATH)/pm/powerplay \
+               -I$(FULL_AMD_PATH)/pm/powerplay/inc \
                -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\
-               -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr
+               -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \
+               -I$(FULL_AMD_PATH)/pm/legacy-dpm
 
 AMD_PM_PATH = ../pm
 
-PM_LIBS = swsmu powerplay
+PM_LIBS = swsmu powerplay legacy-dpm
 
 AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS)))
 
 include $(AMD_PM)
 
-PM_MGR = amdgpu_dpm.o amdgpu_pm.o
+PM_MGR = amdgpu_dpm.o amdgpu_pm.o amdgpu_dpm_internal.o
 
 AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR))
 
index 08362d506534ba14964e8f8152178b816443506c..1d63f1e8884c3eab665f7d373c1ac319591d13b1 100644 (file)
 #include "amdgpu_display.h"
 #include "hwmgr.h"
 #include <linux/power_supply.h>
+#include "amdgpu_smu.h"
 
-#define WIDTH_4K 3840
+#define amdgpu_dpm_enable_bapm(adev, e) \
+               ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
 
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
-{
-       const char *s;
-
-       switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
-       case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
-       default:
-               s = "none";
-               break;
-       case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
-               s = "battery";
-               break;
-       case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
-               s = "balanced";
-               break;
-       case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
-               s = "performance";
-               break;
-       }
-       printk("\tui class: %s\n", s);
-       printk("\tinternal class:");
-       if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
-           (class2 == 0))
-               pr_cont(" none");
-       else {
-               if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
-                       pr_cont(" boot");
-               if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
-                       pr_cont(" thermal");
-               if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
-                       pr_cont(" limited_pwr");
-               if (class & ATOM_PPLIB_CLASSIFICATION_REST)
-                       pr_cont(" rest");
-               if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
-                       pr_cont(" forced");
-               if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
-                       pr_cont(" 3d_perf");
-               if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
-                       pr_cont(" ovrdrv");
-               if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
-                       pr_cont(" uvd");
-               if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
-                       pr_cont(" 3d_low");
-               if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
-                       pr_cont(" acpi");
-               if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
-                       pr_cont(" uvd_hd2");
-               if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
-                       pr_cont(" uvd_hd");
-               if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
-                       pr_cont(" uvd_sd");
-               if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
-                       pr_cont(" limited_pwr2");
-               if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
-                       pr_cont(" ulv");
-               if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
-                       pr_cont(" uvd_mvc");
-       }
-       pr_cont("\n");
-}
-
-void amdgpu_dpm_print_cap_info(u32 caps)
-{
-       printk("\tcaps:");
-       if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
-               pr_cont(" single_disp");
-       if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
-               pr_cont(" video");
-       if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
-               pr_cont(" no_dc");
-       pr_cont("\n");
-}
-
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
-                               struct amdgpu_ps *rps)
-{
-       printk("\tstatus:");
-       if (rps == adev->pm.dpm.current_ps)
-               pr_cont(" c");
-       if (rps == adev->pm.dpm.requested_ps)
-               pr_cont(" r");
-       if (rps == adev->pm.dpm.boot_ps)
-               pr_cont(" b");
-       pr_cont("\n");
-}
-
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
-{
-       struct drm_device *ddev = adev_to_drm(adev);
-       struct drm_crtc *crtc;
-       struct amdgpu_crtc *amdgpu_crtc;
-
-       adev->pm.dpm.new_active_crtcs = 0;
-       adev->pm.dpm.new_active_crtc_count = 0;
-       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-               list_for_each_entry(crtc,
-                                   &ddev->mode_config.crtc_list, head) {
-                       amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       if (amdgpu_crtc->enabled) {
-                               adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
-                               adev->pm.dpm.new_active_crtc_count++;
-                       }
-               }
-       }
-}
-
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
-       struct drm_device *dev = adev_to_drm(adev);
-       struct drm_crtc *crtc;
-       struct amdgpu_crtc *amdgpu_crtc;
-       u32 vblank_in_pixels;
-       u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
-
-       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
-                               vblank_in_pixels =
-                                       amdgpu_crtc->hw_mode.crtc_htotal *
-                                       (amdgpu_crtc->hw_mode.crtc_vblank_end -
-                                       amdgpu_crtc->hw_mode.crtc_vdisplay +
-                                       (amdgpu_crtc->v_border * 2));
-
-                               vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
-                               break;
-                       }
-               }
-       }
-
-       return vblank_time_us;
-}
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
-       struct drm_device *dev = adev_to_drm(adev);
-       struct drm_crtc *crtc;
-       struct amdgpu_crtc *amdgpu_crtc;
-       u32 vrefresh = 0;
-
-       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
-                               vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
-                               break;
-                       }
-               }
-       }
-
-       return vrefresh;
-}
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
-{
-       switch (sensor) {
-       case THERMAL_TYPE_RV6XX:
-       case THERMAL_TYPE_RV770:
-       case THERMAL_TYPE_EVERGREEN:
-       case THERMAL_TYPE_SUMO:
-       case THERMAL_TYPE_NI:
-       case THERMAL_TYPE_SI:
-       case THERMAL_TYPE_CI:
-       case THERMAL_TYPE_KV:
-               return true;
-       case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
-       case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
-               return false; /* need special handling */
-       case THERMAL_TYPE_NONE:
-       case THERMAL_TYPE_EXTERNAL:
-       case THERMAL_TYPE_EXTERNAL_GPIO:
-       default:
-               return false;
-       }
-}
-
-union power_info {
-       struct _ATOM_POWERPLAY_INFO info;
-       struct _ATOM_POWERPLAY_INFO_V2 info_2;
-       struct _ATOM_POWERPLAY_INFO_V3 info_3;
-       struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
-       struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
-       struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
-       struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
-       struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
-       struct _ATOM_PPLIB_FANTABLE fan;
-       struct _ATOM_PPLIB_FANTABLE2 fan2;
-       struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
-                                             ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
-{
-       u32 size = atom_table->ucNumEntries *
-               sizeof(struct amdgpu_clock_voltage_dependency_entry);
-       int i;
-       ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
-
-       amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
-       if (!amdgpu_table->entries)
-               return -ENOMEM;
-
-       entry = &atom_table->entries[0];
-       for (i = 0; i < atom_table->ucNumEntries; i++) {
-               amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
-                       (entry->ucClockHigh << 16);
-               amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
-               entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
-                       ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
-       }
-       amdgpu_table->count = atom_table->ucNumEntries;
-
-       return 0;
-}
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev)
-{
-       struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       union power_info *power_info;
-       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-       u16 data_offset;
-       u8 frev, crev;
-
-       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
-                                  &frev, &crev, &data_offset))
-               return -EINVAL;
-       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
-       adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
-       adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
-       adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
-
-       return 0;
-}
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
-{
-       struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       union power_info *power_info;
-       union fan_info *fan_info;
-       ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
-       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-       u16 data_offset;
-       u8 frev, crev;
-       int ret, i;
-
-       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
-                                  &frev, &crev, &data_offset))
-               return -EINVAL;
-       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
-       /* fan table */
-       if (le16_to_cpu(power_info->pplib.usTableSize) >=
-           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
-               if (power_info->pplib3.usFanTableOffset) {
-                       fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
-                                                     le16_to_cpu(power_info->pplib3.usFanTableOffset));
-                       adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
-                       adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
-                       adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
-                       adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
-                       adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
-                       adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
-                       adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
-                       if (fan_info->fan.ucFanTableFormat >= 2)
-                               adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
-                       else
-                               adev->pm.dpm.fan.t_max = 10900;
-                       adev->pm.dpm.fan.cycle_delay = 100000;
-                       if (fan_info->fan.ucFanTableFormat >= 3) {
-                               adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
-                               adev->pm.dpm.fan.default_max_fan_pwm =
-                                       le16_to_cpu(fan_info->fan3.usFanPWMMax);
-                               adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
-                               adev->pm.dpm.fan.fan_output_sensitivity =
-                                       le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
-                       }
-                       adev->pm.dpm.fan.ucode_fan_control = true;
-               }
-       }
-
-       /* clock dependancy tables, shedding tables */
-       if (le16_to_cpu(power_info->pplib.usTableSize) >=
-           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
-               if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
-                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
-                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
-                                                                dep_table);
-                       if (ret) {
-                               amdgpu_free_extended_power_table(adev);
-                               return ret;
-                       }
-               }
-               if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
-                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
-                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
-                                                                dep_table);
-                       if (ret) {
-                               amdgpu_free_extended_power_table(adev);
-                               return ret;
-                       }
-               }
-               if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
-                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
-                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
-                                                                dep_table);
-                       if (ret) {
-                               amdgpu_free_extended_power_table(adev);
-                               return ret;
-                       }
-               }
-               if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
-                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
-                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
-                                                                dep_table);
-                       if (ret) {
-                               amdgpu_free_extended_power_table(adev);
-                               return ret;
-                       }
-               }
-               if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
-                       ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
-                               (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
-                       if (clk_v->ucNumEntries) {
-                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
-                                       le16_to_cpu(clk_v->entries[0].usSclkLow) |
-                                       (clk_v->entries[0].ucSclkHigh << 16);
-                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
-                                       le16_to_cpu(clk_v->entries[0].usMclkLow) |
-                                       (clk_v->entries[0].ucMclkHigh << 16);
-                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
-                                       le16_to_cpu(clk_v->entries[0].usVddc);
-                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
-                                       le16_to_cpu(clk_v->entries[0].usVddci);
-                       }
-               }
-               if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
-                       ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
-                               (ATOM_PPLIB_PhaseSheddingLimits_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
-                       ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
-
-                       adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
-                               kcalloc(psl->ucNumEntries,
-                                       sizeof(struct amdgpu_phase_shedding_limits_entry),
-                                       GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-
-                       entry = &psl->entries[0];
-                       for (i = 0; i < psl->ucNumEntries; i++) {
-                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
-                                       le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
-                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
-                                       le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
-                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
-                                       le16_to_cpu(entry->usVoltage);
-                               entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
-                       }
-                       adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
-                               psl->ucNumEntries;
-               }
-       }
-
-       /* cac data */
-       if (le16_to_cpu(power_info->pplib.usTableSize) >=
-           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
-               adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
-               adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
-               adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
-               adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
-               if (adev->pm.dpm.tdp_od_limit)
-                       adev->pm.dpm.power_control = true;
-               else
-                       adev->pm.dpm.power_control = false;
-               adev->pm.dpm.tdp_adjustment = 0;
-               adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
-               adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
-               adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
-               if (power_info->pplib5.usCACLeakageTableOffset) {
-                       ATOM_PPLIB_CAC_Leakage_Table *cac_table =
-                               (ATOM_PPLIB_CAC_Leakage_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
-                       ATOM_PPLIB_CAC_Leakage_Record *entry;
-                       u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
-                       adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       entry = &cac_table->entries[0];
-                       for (i = 0; i < cac_table->ucNumEntries; i++) {
-                               if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
-                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
-                                               le16_to_cpu(entry->usVddc1);
-                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
-                                               le16_to_cpu(entry->usVddc2);
-                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
-                                               le16_to_cpu(entry->usVddc3);
-                               } else {
-                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
-                                               le16_to_cpu(entry->usVddc);
-                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
-                                               le32_to_cpu(entry->ulLeakageValue);
-                               }
-                               entry = (ATOM_PPLIB_CAC_Leakage_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
-                       }
-                       adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
-               }
-       }
-
-       /* ext tables */
-       if (le16_to_cpu(power_info->pplib.usTableSize) >=
-           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
-               ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
-                       (mode_info->atom_context->bios + data_offset +
-                        le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
-                       ext_hdr->usVCETableOffset) {
-                       VCEClockInfoArray *array = (VCEClockInfoArray *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
-                       ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
-                               (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
-                                1 + array->ucNumEntries * sizeof(VCEClockInfo));
-                       ATOM_PPLIB_VCE_State_Table *states =
-                               (ATOM_PPLIB_VCE_State_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
-                                1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
-                                1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
-                       ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
-                       ATOM_PPLIB_VCE_State_Record *state_entry;
-                       VCEClockInfo *vce_clk;
-                       u32 size = limits->numEntries *
-                               sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
-                       adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
-                               kzalloc(size, GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
-                               limits->numEntries;
-                       entry = &limits->entries[0];
-                       state_entry = &states->entries[0];
-                       for (i = 0; i < limits->numEntries; i++) {
-                               vce_clk = (VCEClockInfo *)
-                                       ((u8 *)&array->entries[0] +
-                                        (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
-                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
-                                       le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
-                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
-                                       le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
-                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(entry->usVoltage);
-                               entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
-                       }
-                       adev->pm.dpm.num_of_vce_states =
-                                       states->numEntries > AMD_MAX_VCE_LEVELS ?
-                                       AMD_MAX_VCE_LEVELS : states->numEntries;
-                       for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
-                               vce_clk = (VCEClockInfo *)
-                                       ((u8 *)&array->entries[0] +
-                                        (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
-                               adev->pm.dpm.vce_states[i].evclk =
-                                       le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
-                               adev->pm.dpm.vce_states[i].ecclk =
-                                       le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
-                               adev->pm.dpm.vce_states[i].clk_idx =
-                                       state_entry->ucClockInfoIndex & 0x3f;
-                               adev->pm.dpm.vce_states[i].pstate =
-                                       (state_entry->ucClockInfoIndex & 0xc0) >> 6;
-                               state_entry = (ATOM_PPLIB_VCE_State_Record *)
-                                       ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
-                       }
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
-                       ext_hdr->usUVDTableOffset) {
-                       UVDClockInfoArray *array = (UVDClockInfoArray *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
-                       ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
-                               (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
-                                1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
-                       ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
-                       u32 size = limits->numEntries *
-                               sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
-                       adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
-                               kzalloc(size, GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
-                               limits->numEntries;
-                       entry = &limits->entries[0];
-                       for (i = 0; i < limits->numEntries; i++) {
-                               UVDClockInfo *uvd_clk = (UVDClockInfo *)
-                                       ((u8 *)&array->entries[0] +
-                                        (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
-                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
-                                       le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
-                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
-                                       le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
-                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(entry->usVoltage);
-                               entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
-                       }
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
-                       ext_hdr->usSAMUTableOffset) {
-                       ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
-                               (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
-                       ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
-                       u32 size = limits->numEntries *
-                               sizeof(struct amdgpu_clock_voltage_dependency_entry);
-                       adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
-                               kzalloc(size, GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
-                               limits->numEntries;
-                       entry = &limits->entries[0];
-                       for (i = 0; i < limits->numEntries; i++) {
-                               adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
-                                       le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
-                               adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(entry->usVoltage);
-                               entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
-                       }
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
-                   ext_hdr->usPPMTableOffset) {
-                       ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usPPMTableOffset));
-                       adev->pm.dpm.dyn_state.ppm_table =
-                               kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.ppm_table) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
-                       adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
-                               le16_to_cpu(ppm->usCpuCoreNumber);
-                       adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
-                               le32_to_cpu(ppm->ulPlatformTDP);
-                       adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
-                               le32_to_cpu(ppm->ulSmallACPlatformTDP);
-                       adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
-                               le32_to_cpu(ppm->ulPlatformTDC);
-                       adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
-                               le32_to_cpu(ppm->ulSmallACPlatformTDC);
-                       adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
-                               le32_to_cpu(ppm->ulApuTDP);
-                       adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
-                               le32_to_cpu(ppm->ulDGpuTDP);
-                       adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
-                               le32_to_cpu(ppm->ulDGpuUlvPower);
-                       adev->pm.dpm.dyn_state.ppm_table->tj_max =
-                               le32_to_cpu(ppm->ulTjmax);
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
-                       ext_hdr->usACPTableOffset) {
-                       ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
-                               (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
-                       ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
-                       u32 size = limits->numEntries *
-                               sizeof(struct amdgpu_clock_voltage_dependency_entry);
-                       adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
-                               kzalloc(size, GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
-                               limits->numEntries;
-                       entry = &limits->entries[0];
-                       for (i = 0; i < limits->numEntries; i++) {
-                               adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
-                                       le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
-                               adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
-                                       le16_to_cpu(entry->usVoltage);
-                               entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
-                                       ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
-                       }
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
-                       ext_hdr->usPowerTuneTableOffset) {
-                       u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
-                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
-                       ATOM_PowerTune_Table *pt;
-                       adev->pm.dpm.dyn_state.cac_tdp_table =
-                               kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
-                       if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
-                               amdgpu_free_extended_power_table(adev);
-                               return -ENOMEM;
-                       }
-                       if (rev > 0) {
-                               ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
-                                       (mode_info->atom_context->bios + data_offset +
-                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
-                               adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
-                                       ppt->usMaximumPowerDeliveryLimit;
-                               pt = &ppt->power_tune_table;
-                       } else {
-                               ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
-                                       (mode_info->atom_context->bios + data_offset +
-                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
-                               adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
-                               pt = &ppt->power_tune_table;
-                       }
-                       adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
-                               le16_to_cpu(pt->usConfigurableTDP);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
-                               le16_to_cpu(pt->usBatteryPowerLimit);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
-                               le16_to_cpu(pt->usSmallPowerLimit);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
-                               le16_to_cpu(pt->usLowCACLeakage);
-                       adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
-                               le16_to_cpu(pt->usHighCACLeakage);
-               }
-               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
-                               ext_hdr->usSclkVddgfxTableOffset) {
-                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
-                               (mode_info->atom_context->bios + data_offset +
-                                le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
-                       ret = amdgpu_parse_clk_voltage_dep_table(
-                                       &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
-                                       dep_table);
-                       if (ret) {
-                               kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
-                               return ret;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
-{
-       struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
-
-       kfree(dyn_state->vddc_dependency_on_sclk.entries);
-       kfree(dyn_state->vddci_dependency_on_mclk.entries);
-       kfree(dyn_state->vddc_dependency_on_mclk.entries);
-       kfree(dyn_state->mvdd_dependency_on_mclk.entries);
-       kfree(dyn_state->cac_leakage_table.entries);
-       kfree(dyn_state->phase_shedding_limits_table.entries);
-       kfree(dyn_state->ppm_table);
-       kfree(dyn_state->cac_tdp_table);
-       kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
-       kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
-       kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
-       kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
-       kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
-}
-
-static const char *pp_lib_thermal_controller_names[] = {
-       "NONE",
-       "lm63",
-       "adm1032",
-       "adm1030",
-       "max6649",
-       "lm64",
-       "f75375",
-       "RV6xx",
-       "RV770",
-       "adt7473",
-       "NONE",
-       "External GPIO",
-       "Evergreen",
-       "emc2103",
-       "Sumo",
-       "Northern Islands",
-       "Southern Islands",
-       "lm96163",
-       "Sea Islands",
-       "Kaveri/Kabini",
-};
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
-{
-       struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       ATOM_PPLIB_POWERPLAYTABLE *power_table;
-       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
-       ATOM_PPLIB_THERMALCONTROLLER *controller;
-       struct amdgpu_i2c_bus_rec i2c_bus;
-       u16 data_offset;
-       u8 frev, crev;
-
-       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
-                                  &frev, &crev, &data_offset))
-               return;
-       power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
-               (mode_info->atom_context->bios + data_offset);
-       controller = &power_table->sThermalController;
-
-       /* add the i2c bus for thermal/fan chip */
-       if (controller->ucType > 0) {
-               if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
-                       adev->pm.no_fan = true;
-               adev->pm.fan_pulses_per_revolution =
-                       controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
-               if (adev->pm.fan_pulses_per_revolution) {
-                       adev->pm.fan_min_rpm = controller->ucFanMinRPM;
-                       adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
-               }
-               if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_NI;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_SI;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_CI;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
-                       DRM_INFO("Internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_KV;
-               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
-                       DRM_INFO("External GPIO thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
-               } else if (controller->ucType ==
-                          ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
-                       DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
-               } else if (controller->ucType ==
-                          ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
-                       DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
-               } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
-                       DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
-                                pp_lib_thermal_controller_names[controller->ucType],
-                                controller->ucI2cAddress >> 1,
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-                       adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
-                       i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
-                       adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
-                       if (adev->pm.i2c_bus) {
-                               struct i2c_board_info info = { };
-                               const char *name = pp_lib_thermal_controller_names[controller->ucType];
-                               info.addr = controller->ucI2cAddress >> 1;
-                               strlcpy(info.type, name, sizeof(info.type));
-                               i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
-                       }
-               } else {
-                       DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
-                                controller->ucType,
-                                controller->ucI2cAddress >> 1,
-                                (controller->ucFanParameters &
-                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-               }
-       }
-}
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
-                                                u32 sys_mask,
-                                                enum amdgpu_pcie_gen asic_gen,
-                                                enum amdgpu_pcie_gen default_gen)
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
 {
-       switch (asic_gen) {
-       case AMDGPU_PCIE_GEN1:
-               return AMDGPU_PCIE_GEN1;
-       case AMDGPU_PCIE_GEN2:
-               return AMDGPU_PCIE_GEN2;
-       case AMDGPU_PCIE_GEN3:
-               return AMDGPU_PCIE_GEN3;
-       default:
-               if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
-                   (default_gen == AMDGPU_PCIE_GEN3))
-                       return AMDGPU_PCIE_GEN3;
-               else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
-                        (default_gen == AMDGPU_PCIE_GEN2))
-                       return AMDGPU_PCIE_GEN2;
-               else
-                       return AMDGPU_PCIE_GEN1;
-       }
-       return AMDGPU_PCIE_GEN1;
-}
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       if (!pp_funcs->get_sclk)
+               return 0;
 
-       if (idx < adev->pm.dpm.num_of_vce_states)
-               return &adev->pm.dpm.vce_states[idx];
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
+                                low);
+       mutex_unlock(&adev->pm.mutex);
 
-       return NULL;
+       return ret;
 }
 
-int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
 {
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
 
-       return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
-}
+       if (!pp_funcs->get_mclk)
+               return 0;
 
-int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
+                                low);
+       mutex_unlock(&adev->pm.mutex);
 
-       return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+       return ret;
 }
 
 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
@@ -935,52 +80,20 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
                return 0;
        }
 
+       mutex_lock(&adev->pm.mutex);
+
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
        case AMD_IP_BLOCK_TYPE_VCE:
-               if (pp_funcs && pp_funcs->set_powergating_by_smu) {
-                       /*
-                        * TODO: need a better lock mechanism
-                        *
-                        * Here adev->pm.mutex lock protection is enforced on
-                        * UVD and VCE cases only. Since for other cases, there
-                        * may be already lock protection in amdgpu_pm.c.
-                        * This is a quick fix for the deadlock issue below.
-                        *     NFO: task ocltst:2028 blocked for more than 120 seconds.
-                        *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
-                        *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
-                        *     cltst          D    0  2028   2026 0x00000000
-                        *     all Trace:
-                        *     __schedule+0x2c0/0x870
-                        *     schedule+0x2c/0x70
-                        *     schedule_preempt_disabled+0xe/0x10
-                        *     __mutex_lock.isra.9+0x26d/0x4e0
-                        *     __mutex_lock_slowpath+0x13/0x20
-                        *     ? __mutex_lock_slowpath+0x13/0x20
-                        *     mutex_lock+0x2f/0x40
-                        *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
-                        *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
-                        *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
-                        *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
-                        *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
-                        *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
-                        */
-                       mutex_lock(&adev->pm.mutex);
-                       ret = (pp_funcs->set_powergating_by_smu(
-                               (adev)->powerplay.pp_handle, block_type, gate));
-                       mutex_unlock(&adev->pm.mutex);
-               }
-               break;
        case AMD_IP_BLOCK_TYPE_GFX:
        case AMD_IP_BLOCK_TYPE_VCN:
        case AMD_IP_BLOCK_TYPE_SDMA:
        case AMD_IP_BLOCK_TYPE_JPEG:
        case AMD_IP_BLOCK_TYPE_GMC:
        case AMD_IP_BLOCK_TYPE_ACP:
-               if (pp_funcs && pp_funcs->set_powergating_by_smu) {
+               if (pp_funcs && pp_funcs->set_powergating_by_smu)
                        ret = (pp_funcs->set_powergating_by_smu(
                                (adev)->powerplay.pp_handle, block_type, gate));
-               }
                break;
        default:
                break;
@@ -989,6 +102,8 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
        if (!ret)
                atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
 
+       mutex_unlock(&adev->pm.mutex);
+
        return ret;
 }
 
@@ -1001,9 +116,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
        if (!pp_funcs || !pp_funcs->set_asic_baco_state)
                return -ENOENT;
 
+       mutex_lock(&adev->pm.mutex);
+
        /* enter BACO state */
        ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
 
+       mutex_unlock(&adev->pm.mutex);
+
        return ret;
 }
 
@@ -1016,9 +135,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
        if (!pp_funcs || !pp_funcs->set_asic_baco_state)
                return -ENOENT;
 
+       mutex_lock(&adev->pm.mutex);
+
        /* exit BACO state */
        ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
 
+       mutex_unlock(&adev->pm.mutex);
+
        return ret;
 }
 
@@ -1029,9 +152,13 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
        if (pp_funcs && pp_funcs->set_mp1_state) {
+               mutex_lock(&adev->pm.mutex);
+
                ret = pp_funcs->set_mp1_state(
                                adev->powerplay.pp_handle,
                                mp1_state);
+
+               mutex_unlock(&adev->pm.mutex);
        }
 
        return ret;
@@ -1042,25 +169,37 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        void *pp_handle = adev->powerplay.pp_handle;
        bool baco_cap;
+       int ret = 0;
 
        if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
                return false;
 
-       if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
-               return false;
+       mutex_lock(&adev->pm.mutex);
+
+       ret = pp_funcs->get_asic_baco_capability(pp_handle,
+                                                &baco_cap);
+
+       mutex_unlock(&adev->pm.mutex);
 
-       return baco_cap;
+       return ret ? false : baco_cap;
 }
 
 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
 {
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        void *pp_handle = adev->powerplay.pp_handle;
+       int ret = 0;
 
        if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
                return -ENOENT;
 
-       return pp_funcs->asic_reset_mode_2(pp_handle);
+       mutex_lock(&adev->pm.mutex);
+
+       ret = pp_funcs->asic_reset_mode_2(pp_handle);
+
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
 }
 
 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
@@ -1072,37 +211,47 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
        if (!pp_funcs || !pp_funcs->set_asic_baco_state)
                return -ENOENT;
 
+       mutex_lock(&adev->pm.mutex);
+
        /* enter BACO state */
        ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
        if (ret)
-               return ret;
+               goto out;
 
        /* exit BACO state */
        ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
-       if (ret)
-               return ret;
 
-       return 0;
+out:
+       mutex_unlock(&adev->pm.mutex);
+       return ret;
 }
 
 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       bool support_mode1_reset = false;
 
-       if (is_support_sw_smu(adev))
-               return smu_mode1_reset_is_support(smu);
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               support_mode1_reset = smu_mode1_reset_is_support(smu);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
-       return false;
+       return support_mode1_reset;
 }
 
 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret = -EOPNOTSUPP;
 
-       if (is_support_sw_smu(adev))
-               return smu_mode1_reset(smu);
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               ret = smu_mode1_reset(smu);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
-       return -EOPNOTSUPP;
+       return ret;
 }
 
 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
@@ -1115,9 +264,12 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       if (pp_funcs && pp_funcs->switch_power_profile)
+       if (pp_funcs && pp_funcs->switch_power_profile) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->switch_power_profile(
                        adev->powerplay.pp_handle, type, en);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
@@ -1128,9 +280,12 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int ret = 0;
 
-       if (pp_funcs && pp_funcs->set_xgmi_pstate)
+       if (pp_funcs && pp_funcs->set_xgmi_pstate) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
                                                                pstate);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
@@ -1142,20 +297,27 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        void *pp_handle = adev->powerplay.pp_handle;
 
-       if (pp_funcs && pp_funcs->set_df_cstate)
+       if (pp_funcs && pp_funcs->set_df_cstate) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
 
 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret = 0;
 
-       if (is_support_sw_smu(adev))
-               return smu_allow_xgmi_power_down(smu, en);
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               ret = smu_allow_xgmi_power_down(smu, en);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
-       return 0;
+       return ret;
 }
 
 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
@@ -1165,8 +327,11 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
                        adev->powerplay.pp_funcs;
        int ret = 0;
 
-       if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
+       if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
@@ -1179,9 +344,12 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
                        adev->powerplay.pp_funcs;
        int ret = 0;
 
-       if (pp_funcs && pp_funcs->set_clockgating_by_smu)
+       if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->set_clockgating_by_smu(pp_handle,
                                                       msg_id);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
@@ -1194,9 +362,12 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
                        adev->powerplay.pp_funcs;
        int ret = -EOPNOTSUPP;
 
-       if (pp_funcs && pp_funcs->smu_i2c_bus_access)
+       if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
+               mutex_lock(&adev->pm.mutex);
                ret = pp_funcs->smu_i2c_bus_access(pp_handle,
                                                   acquire);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
@@ -1209,13 +380,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
                        adev->pm.ac_power = true;
                else
                        adev->pm.ac_power = false;
+
                if (adev->powerplay.pp_funcs &&
                    adev->powerplay.pp_funcs->enable_bapm)
                        amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
-               mutex_unlock(&adev->pm.mutex);
 
                if (is_support_sw_smu(adev))
-                       smu_set_ac_dc(&adev->smu);
+                       smu_set_ac_dc(adev->powerplay.pp_handle);
+
+               mutex_unlock(&adev->pm.mutex);
        }
 }
 
@@ -1223,394 +396,1219 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
                           void *data, uint32_t *size)
 {
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       int ret = 0;
+       int ret = -EINVAL;
 
        if (!data || !size)
                return -EINVAL;
 
-       if (pp_funcs && pp_funcs->read_sensor)
-               ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
-                                                                   sensor, data, size);
-       else
-               ret = -EINVAL;
+       if (pp_funcs && pp_funcs->read_sensor) {
+               mutex_lock(&adev->pm.mutex);
+               ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
+                                           sensor,
+                                           data,
+                                           size);
+               mutex_unlock(&adev->pm.mutex);
+       }
 
        return ret;
 }
 
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
 {
-       struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device,
-                            pm.dpm.thermal.work);
-       /* switch to the thermal state */
-       enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
-       int temp, size = sizeof(temp);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
        if (!adev->pm.dpm_enabled)
                return;
 
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
-                                   (void *)&temp, &size)) {
-               if (temp < adev->pm.dpm.thermal.min_temp)
-                       /* switch back the user state */
-                       dpm_state = adev->pm.dpm.user_state;
-       } else {
-               if (adev->pm.dpm.thermal.high_to_low)
-                       /* switch back the user state */
-                       dpm_state = adev->pm.dpm.user_state;
-       }
+       if (!pp_funcs->pm_compute_clocks)
+               return;
+
        mutex_lock(&adev->pm.mutex);
-       if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
-               adev->pm.dpm.thermal_active = true;
-       else
-               adev->pm.dpm.thermal_active = false;
-       adev->pm.dpm.state = dpm_state;
+       pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
        mutex_unlock(&adev->pm.mutex);
-
-       amdgpu_pm_compute_clocks(adev);
 }
 
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
-                                                    enum amd_pm_state_type dpm_state)
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
 {
-       int i;
-       struct amdgpu_ps *ps;
-       u32 ui_class;
-       bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
-               true : false;
-
-       /* check if the vblank period is too short to adjust the mclk */
-       if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
-               if (amdgpu_dpm_vblank_too_short(adev))
-                       single_display = false;
-       }
-
-       /* certain older asics have a separare 3D performance state,
-        * so try that first if the user selected performance
-        */
-       if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
-               dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
-       /* balanced states don't exist at the moment */
-       if (dpm_state == POWER_STATE_TYPE_BALANCED)
-               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
-       /* Pick the best power state based on current conditions */
-       for (i = 0; i < adev->pm.dpm.num_ps; i++) {
-               ps = &adev->pm.dpm.ps[i];
-               ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
-               switch (dpm_state) {
-               /* user states */
-               case POWER_STATE_TYPE_BATTERY:
-                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
-                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
-                                       if (single_display)
-                                               return ps;
-                               } else
-                                       return ps;
-                       }
-                       break;
-               case POWER_STATE_TYPE_BALANCED:
-                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
-                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
-                                       if (single_display)
-                                               return ps;
-                               } else
-                                       return ps;
-                       }
-                       break;
-               case POWER_STATE_TYPE_PERFORMANCE:
-                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
-                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
-                                       if (single_display)
-                                               return ps;
-                               } else
-                                       return ps;
-                       }
-                       break;
-               /* internal states */
-               case POWER_STATE_TYPE_INTERNAL_UVD:
-                       if (adev->pm.dpm.uvd_ps)
-                               return adev->pm.dpm.uvd_ps;
-                       else
-                               break;
-               case POWER_STATE_TYPE_INTERNAL_UVD_SD:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_UVD_HD:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
-                       if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_BOOT:
-                       return adev->pm.dpm.boot_ps;
-               case POWER_STATE_TYPE_INTERNAL_THERMAL:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_ACPI:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_ULV:
-                       if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
-                               return ps;
-                       break;
-               case POWER_STATE_TYPE_INTERNAL_3DPERF:
-                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
-                               return ps;
-                       break;
-               default:
-                       break;
-               }
-       }
-       /* use a fallback state if we didn't match */
-       switch (dpm_state) {
-       case POWER_STATE_TYPE_INTERNAL_UVD_SD:
-               dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
-               goto restart_search;
-       case POWER_STATE_TYPE_INTERNAL_UVD_HD:
-       case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
-       case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
-               if (adev->pm.dpm.uvd_ps) {
-                       return adev->pm.dpm.uvd_ps;
-               } else {
-                       dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-                       goto restart_search;
-               }
-       case POWER_STATE_TYPE_INTERNAL_THERMAL:
-               dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
-               goto restart_search;
-       case POWER_STATE_TYPE_INTERNAL_ACPI:
-               dpm_state = POWER_STATE_TYPE_BATTERY;
-               goto restart_search;
-       case POWER_STATE_TYPE_BATTERY:
-       case POWER_STATE_TYPE_BALANCED:
-       case POWER_STATE_TYPE_INTERNAL_3DPERF:
-               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-               goto restart_search;
-       default:
-               break;
-       }
+       int ret = 0;
 
-       return NULL;
+       ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+       if (ret)
+               DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+                         enable ? "enable" : "disable", ret);
 }
 
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
 {
-       struct amdgpu_ps *ps;
-       enum amd_pm_state_type dpm_state;
-       int ret;
-       bool equal = false;
-
-       /* if dpm init failed */
-       if (!adev->pm.dpm_enabled)
-               return;
+       int ret = 0;
 
-       if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
-               /* add other state override checks here */
-               if ((!adev->pm.dpm.thermal_active) &&
-                   (!adev->pm.dpm.uvd_active))
-                       adev->pm.dpm.state = adev->pm.dpm.user_state;
-       }
-       dpm_state = adev->pm.dpm.state;
+       ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+       if (ret)
+               DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+                         enable ? "enable" : "disable", ret);
+}
 
-       ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
-       if (ps)
-               adev->pm.dpm.requested_ps = ps;
-       else
-               return;
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{
+       int ret = 0;
 
-       if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
-               printk("switching from power state:\n");
-               amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
-               printk("switching to power state:\n");
-               amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
-       }
+       ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+       if (ret)
+               DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+                         enable ? "enable" : "disable", ret);
+}
 
-       /* update whether vce is active */
-       ps->vce_active = adev->pm.dpm.vce_active;
-       if (adev->powerplay.pp_funcs->display_configuration_changed)
-               amdgpu_dpm_display_configuration_changed(adev);
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int r = 0;
 
-       ret = amdgpu_dpm_pre_set_power_state(adev);
-       if (ret)
-               return;
+       if (!pp_funcs || !pp_funcs->load_firmware)
+               return 0;
 
-       if (adev->powerplay.pp_funcs->check_state_equal) {
-               if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
-                       equal = false;
+       mutex_lock(&adev->pm.mutex);
+       r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
+       if (r) {
+               pr_err("smu firmware loading failed\n");
+               goto out;
        }
 
-       if (equal)
-               return;
+       if (smu_version)
+               *smu_version = adev->pm.fw_version;
 
-       amdgpu_dpm_set_power_state(adev);
-       amdgpu_dpm_post_set_power_state(adev);
-
-       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
-       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
-       if (adev->powerplay.pp_funcs->force_performance_level) {
-               if (adev->pm.dpm.thermal_active) {
-                       enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
-                       /* force low perf level for thermal */
-                       amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
-                       /* save the user's level */
-                       adev->pm.dpm.forced_level = level;
-               } else {
-                       /* otherwise, user selected level */
-                       amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
-               }
-       }
+out:
+       mutex_unlock(&adev->pm.mutex);
+       return r;
 }
 
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
 {
-       int i = 0;
-
-       if (!adev->pm.dpm_enabled)
-               return;
-
-       if (adev->mode_info.num_crtc)
-               amdgpu_display_bandwidth_update(adev);
-
-       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
-               struct amdgpu_ring *ring = adev->rings[i];
-               if (ring && ring->sched.ready)
-                       amdgpu_fence_wait_empty(ring);
-       }
+       int ret = 0;
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
-               if (!amdgpu_device_has_dc_support(adev)) {
-                       mutex_lock(&adev->pm.mutex);
-                       amdgpu_dpm_get_active_displays(adev);
-                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
-                       adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
-                       adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
-                       /* we have issues with mclk switching with
-                        * refresh rates over 120 hz on the non-DC code.
-                        */
-                       if (adev->pm.pm_display_cfg.vrefresh > 120)
-                               adev->pm.pm_display_cfg.min_vblank_time = 0;
-                       if (adev->powerplay.pp_funcs->display_configuration_change)
-                               adev->powerplay.pp_funcs->display_configuration_change(
-                                                       adev->powerplay.pp_handle,
-                                                       &adev->pm.pm_display_cfg);
-                       mutex_unlock(&adev->pm.mutex);
-               }
-               amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
-       } else {
+       if (is_support_sw_smu(adev)) {
                mutex_lock(&adev->pm.mutex);
-               amdgpu_dpm_get_active_displays(adev);
-               amdgpu_dpm_change_power_state_locked(adev);
+               ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
+                                                enable);
                mutex_unlock(&adev->pm.mutex);
        }
+
+       return ret;
 }
 
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
 {
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
-       if (adev->family == AMDGPU_FAMILY_SI) {
-               mutex_lock(&adev->pm.mutex);
-               if (enable) {
-                       adev->pm.dpm.uvd_active = true;
-                       adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
-               } else {
-                       adev->pm.dpm.uvd_active = false;
-               }
-               mutex_unlock(&adev->pm.mutex);
-
-               amdgpu_pm_compute_clocks(adev);
-       } else {
-               ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
-               if (ret)
-                       DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
-                                 enable ? "enable" : "disable", ret);
-
-               /* enable/disable Low Memory PState for UVD (4k videos) */
-               if (adev->asic_type == CHIP_STONEY &&
-                       adev->uvd.decode_image_width >= WIDTH_4K) {
-                       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
-                       if (hwmgr && hwmgr->hwmgr_func &&
-                           hwmgr->hwmgr_func->update_nbdpm_pstate)
-                               hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
-                                                                      !enable,
-                                                                      true);
-               }
-       }
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_send_hbm_bad_pages_num(smu, size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
 }
 
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+                                 enum pp_clock_type type,
+                                 uint32_t *min,
+                                 uint32_t *max)
 {
        int ret = 0;
 
-       if (adev->family == AMDGPU_FAMILY_SI) {
-               mutex_lock(&adev->pm.mutex);
-               if (enable) {
-                       adev->pm.dpm.vce_active = true;
-                       /* XXX select vce level based on ring/task */
-                       adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
-               } else {
-                       adev->pm.dpm.vce_active = false;
-               }
-               mutex_unlock(&adev->pm.mutex);
+       if (type != PP_SCLK)
+               return -EINVAL;
 
-               amdgpu_pm_compute_clocks(adev);
-       } else {
-               ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
-               if (ret)
-                       DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
-                                 enable ? "enable" : "disable", ret);
-       }
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
+                                    SMU_SCLK,
+                                    min,
+                                    max);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
 }
 
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+                                  enum pp_clock_type type,
+                                  uint32_t min,
+                                  uint32_t max)
 {
-       int i;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret = 0;
 
-       if (adev->powerplay.pp_funcs->print_power_state == NULL)
-               return;
+       if (type != PP_SCLK)
+               return -EINVAL;
 
-       for (i = 0; i < adev->pm.dpm.num_ps; i++)
-               amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
 
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_set_soft_freq_range(smu,
+                                     SMU_SCLK,
+                                     min,
+                                     max);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
 }
 
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
 {
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
-       ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
-       if (ret)
-               DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
-                         enable ? "enable" : "disable", ret);
+       if (!is_support_sw_smu(adev))
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_write_watermarks_table(smu);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
 }
 
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
+                             enum smu_event_type event,
+                             uint64_t event_arg)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret = 0;
+
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_wait_for_event(smu, event, event_arg);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret = 0;
+
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = smu_get_status_gfxoff(smu, value);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       if (!is_support_sw_smu(adev))
+               return 0;
+
+       return atomic64_read(&smu->throttle_int_counter);
+}
+
+/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
+ * @adev: amdgpu_device pointer
+ * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
+ *
+ */
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+                                enum gfx_change_state state)
 {
-       int r;
+       mutex_lock(&adev->pm.mutex);
+       if (adev->powerplay.pp_funcs &&
+           adev->powerplay.pp_funcs->gfx_state_change_set)
+               ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+                       (adev)->powerplay.pp_handle, state));
+       mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+                           void *umc_ecc)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
+       return smu_get_ecc_info(smu, umc_ecc);
+}
+
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+                                                    uint32_t idx)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct amd_vce_state *vstate = NULL;
+
+       if (!pp_funcs->get_vce_clock_state)
+               return NULL;
+
+       mutex_lock(&adev->pm.mutex);
+       vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
+                                              idx);
+       mutex_unlock(&adev->pm.mutex);
+
+       return vstate;
+}
+
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
+                                       enum amd_pm_state_type *state)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       mutex_lock(&adev->pm.mutex);
+
+       if (!pp_funcs->get_current_power_state) {
+               *state = adev->pm.dpm.user_state;
+               goto out;
+       }
+
+       *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
+       if (*state < POWER_STATE_TYPE_DEFAULT ||
+           *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
+               *state = adev->pm.dpm.user_state;
+
+out:
+       mutex_unlock(&adev->pm.mutex);
+}
+
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+                               enum amd_pm_state_type state)
+{
+       mutex_lock(&adev->pm.mutex);
+       adev->pm.dpm.user_state = state;
+       mutex_unlock(&adev->pm.mutex);
 
-       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
-               r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
-               if (r) {
-                       pr_err("smu firmware loading failed\n");
-                       return r;
+       if (is_support_sw_smu(adev))
+               return;
+
+       if (amdgpu_dpm_dispatch_task(adev,
+                                    AMD_PP_TASK_ENABLE_USER_STATE,
+                                    &state) == -EOPNOTSUPP)
+               amdgpu_dpm_compute_clocks(adev);
+}
+
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       enum amd_dpm_forced_level level;
+
+       mutex_lock(&adev->pm.mutex);
+       if (pp_funcs->get_performance_level)
+               level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
+       else
+               level = adev->pm.dpm.forced_level;
+       mutex_unlock(&adev->pm.mutex);
+
+       return level;
+}
+
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+                                      enum amd_dpm_forced_level level)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       enum amd_dpm_forced_level current_level;
+       uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+                                       AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+       if (!pp_funcs->force_performance_level)
+               return 0;
+
+       if (adev->pm.dpm.thermal_active)
+               return -EINVAL;
+
+       current_level = amdgpu_dpm_get_performance_level(adev);
+       if (current_level == level)
+               return 0;
+
+       if (adev->asic_type == CHIP_RAVEN) {
+               if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+                       if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+                           level == AMD_DPM_FORCED_LEVEL_MANUAL)
+                               amdgpu_gfx_off_ctrl(adev, false);
+                       else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
+                                level != AMD_DPM_FORCED_LEVEL_MANUAL)
+                               amdgpu_gfx_off_ctrl(adev, true);
                }
+       }
 
-               if (smu_version)
-                       *smu_version = adev->pm.fw_version;
+       if (!(current_level & profile_mode_mask) &&
+           (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+               return -EINVAL;
+
+       if (!(current_level & profile_mode_mask) &&
+             (level & profile_mode_mask)) {
+               /* enter UMD Pstate */
+               amdgpu_device_ip_set_powergating_state(adev,
+                                                      AMD_IP_BLOCK_TYPE_GFX,
+                                                      AMD_PG_STATE_UNGATE);
+               amdgpu_device_ip_set_clockgating_state(adev,
+                                                      AMD_IP_BLOCK_TYPE_GFX,
+                                                      AMD_CG_STATE_UNGATE);
+       } else if ((current_level & profile_mode_mask) &&
+                   !(level & profile_mode_mask)) {
+               /* exit UMD Pstate */
+               amdgpu_device_ip_set_clockgating_state(adev,
+                                                      AMD_IP_BLOCK_TYPE_GFX,
+                                                      AMD_CG_STATE_GATE);
+               amdgpu_device_ip_set_powergating_state(adev,
+                                                      AMD_IP_BLOCK_TYPE_GFX,
+                                                      AMD_PG_STATE_GATE);
+       }
+
+       mutex_lock(&adev->pm.mutex);
+
+       if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
+                                             level)) {
+               mutex_unlock(&adev->pm.mutex);
+               return -EINVAL;
        }
 
+       adev->pm.dpm.forced_level = level;
+
+       mutex_unlock(&adev->pm.mutex);
+
        return 0;
 }
+
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+                                struct pp_states_info *states)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_pp_num_states)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
+                                         states);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+                             enum amd_pp_task task_id,
+                             enum amd_pm_state_type *user_state)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->dispatch_tasks)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
+                                      task_id,
+                                      user_state);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_pp_table)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
+                                    table);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+                                     uint32_t type,
+                                     long *input,
+                                     uint32_t size)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_fine_grain_clk_vol)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
+                                              type,
+                                              input,
+                                              size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+                                 uint32_t type,
+                                 long *input,
+                                 uint32_t size)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->odn_edit_dpm_table)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
+                                          type,
+                                          input,
+                                          size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+                                 enum pp_clock_type type,
+                                 char *buf)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->print_clock_levels)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
+                                          type,
+                                          buf);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+                                 enum pp_clock_type type,
+                                 char *buf,
+                                 int *offset)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->emit_clock_levels)
+               return -ENOENT;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
+                                          type,
+                                          buf,
+                                          offset);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+                                   uint64_t ppfeature_masks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_ppfeature_status)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
+                                            ppfeature_masks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_ppfeature_status)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
+                                            buf);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+                                enum pp_clock_type type,
+                                uint32_t mask)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->force_clock_level)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
+                                         type,
+                                         mask);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_sclk_od)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (is_support_sw_smu(adev))
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       if (pp_funcs->set_sclk_od)
+               pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
+       mutex_unlock(&adev->pm.mutex);
+
+       if (amdgpu_dpm_dispatch_task(adev,
+                                    AMD_PP_TASK_READJUST_POWER_STATE,
+                                    NULL) == -EOPNOTSUPP) {
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_dpm_compute_clocks(adev);
+       }
+
+       return 0;
+}
+
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_mclk_od)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (is_support_sw_smu(adev))
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       if (pp_funcs->set_mclk_od)
+               pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
+       mutex_unlock(&adev->pm.mutex);
+
+       if (amdgpu_dpm_dispatch_task(adev,
+                                    AMD_PP_TASK_READJUST_POWER_STATE,
+                                    NULL) == -EOPNOTSUPP) {
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_dpm_compute_clocks(adev);
+       }
+
+       return 0;
+}
+
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+                                     char *buf)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_power_profile_mode)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
+                                              buf);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+                                     long *input, uint32_t size)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_power_profile_mode)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
+                                              input,
+                                              size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_gpu_metrics)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
+                                       table);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+                                   uint32_t *fan_mode)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_fan_control_mode)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
+                                            fan_mode);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+                                uint32_t speed)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_fan_speed_pwm)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
+                                         speed);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+                                uint32_t *speed)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_fan_speed_pwm)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
+                                         speed);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+                                uint32_t *speed)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_fan_speed_rpm)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
+                                         speed);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+                                uint32_t speed)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_fan_speed_rpm)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
+                                         speed);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+                                   uint32_t mode)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_fan_control_mode)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
+                                            mode);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+                              uint32_t *limit,
+                              enum pp_power_limit_level pp_limit_level,
+                              enum pp_power_type power_type)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_power_limit)
+               return -ENODATA;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+                                       limit,
+                                       pp_limit_level,
+                                       power_type);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+                              uint32_t limit)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_power_limit)
+               return -EINVAL;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
+                                       limit);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
+{
+       bool cclk_dpm_supported = false;
+
+       if (!is_support_sw_smu(adev))
+               return false;
+
+       mutex_lock(&adev->pm.mutex);
+       cclk_dpm_supported = is_support_cclk_dpm(adev);
+       mutex_unlock(&adev->pm.mutex);
+
+       return (int)cclk_dpm_supported;
+}
+
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+                                                      struct seq_file *m)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs->debugfs_print_current_performance_level)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
+                                                         m);
+       mutex_unlock(&adev->pm.mutex);
+
+       return 0;
+}
+
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+                                      void **addr,
+                                      size_t *size)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_smu_prv_buf_details)
+               return -ENOSYS;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
+                                               addr,
+                                               size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
+{
+       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+           (is_support_sw_smu(adev) && smu->is_apu) ||
+               (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+               return true;
+
+       return false;
+}
+
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+                           const char *buf,
+                           size_t size)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_pp_table)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
+                                    buf,
+                                    size);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
+{
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       if (!is_support_sw_smu(adev))
+               return INT_MAX;
+
+       return smu->cpu_core_num;
+}
+
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
+{
+       if (!is_support_sw_smu(adev))
+               return;
+
+       amdgpu_smu_stb_debug_fs_init(adev);
+}
+
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+                                           const struct amd_pp_display_configuration *input)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->display_configuration_change)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
+                                                    input);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+                                enum amd_pp_clock_type type,
+                                struct amd_pp_clocks *clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_clock_by_type)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
+                                         type,
+                                         clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+                                               struct amd_pp_simple_clock_info *clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_display_mode_validation_clocks)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
+                                                          clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_latency *clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_clock_by_type_with_latency)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
+                                                      type,
+                                                      clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_voltage *clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_clock_by_type_with_voltage)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
+                                                      type,
+                                                      clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+                                              void *clock_ranges)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_watermarks_for_clocks_ranges)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
+                                                        clock_ranges);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+                                            struct pp_display_clock_request *clock)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->display_clock_voltage_request)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
+                                                     clock);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+                                 struct amd_pp_clock_info *clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_current_clocks)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
+                                          clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs->notify_smu_enable_pwe)
+               return;
+
+       mutex_lock(&adev->pm.mutex);
+       pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
+       mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+                                       uint32_t count)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_active_display_count)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
+                                                count);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+                                         uint32_t clock)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->set_min_deep_sleep_dcefclk)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
+                                                  clock);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+                                            uint32_t clock)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs->set_hard_min_dcefclk_by_freq)
+               return;
+
+       mutex_lock(&adev->pm.mutex);
+       pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
+                                              clock);
+       mutex_unlock(&adev->pm.mutex);
+}
+
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+                                         uint32_t clock)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+       if (!pp_funcs->set_hard_min_fclk_by_freq)
+               return;
+
+       mutex_lock(&adev->pm.mutex);
+       pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
+                                           clock);
+       mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+                                                  bool disable_memory_clock_switch)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->display_disable_memory_clock_switch)
+               return 0;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
+                                                           disable_memory_clock_switch);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+                                               struct pp_smu_nv_clock_table *max_clocks)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_max_sustainable_clocks_by_dc)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
+                                                        max_clocks);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+                                                 unsigned int *clock_values_in_khz,
+                                                 unsigned int *num_states)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_uclk_dpm_states)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
+                                           clock_values_in_khz,
+                                           num_states);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
+
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+                                  struct dpm_clocks *clock_table)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int ret = 0;
+
+       if (!pp_funcs->get_dpm_clock_table)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&adev->pm.mutex);
+       ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
+                                           clock_table);
+       mutex_unlock(&adev->pm.mutex);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
new file mode 100644 (file)
index 0000000..42efe83
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "hwmgr.h"
+#include "amdgpu_smu.h"
+#include "amdgpu_dpm_internal.h"
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+{
+       struct drm_device *ddev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct amdgpu_crtc *amdgpu_crtc;
+
+       adev->pm.dpm.new_active_crtcs = 0;
+       adev->pm.dpm.new_active_crtc_count = 0;
+       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc,
+                                   &ddev->mode_config.crtc_list, head) {
+                       amdgpu_crtc = to_amdgpu_crtc(crtc);
+                       if (amdgpu_crtc->enabled) {
+                               adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+                               adev->pm.dpm.new_active_crtc_count++;
+                       }
+               }
+       }
+}
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct amdgpu_crtc *amdgpu_crtc;
+       u32 vblank_in_pixels;
+       u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       amdgpu_crtc = to_amdgpu_crtc(crtc);
+                       if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+                               vblank_in_pixels =
+                                       amdgpu_crtc->hw_mode.crtc_htotal *
+                                       (amdgpu_crtc->hw_mode.crtc_vblank_end -
+                                       amdgpu_crtc->hw_mode.crtc_vdisplay +
+                                       (amdgpu_crtc->v_border * 2));
+
+                               vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
+                               break;
+                       }
+               }
+       }
+
+       return vblank_time_us;
+}
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct amdgpu_crtc *amdgpu_crtc;
+       u32 vrefresh = 0;
+
+       if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+                       amdgpu_crtc = to_amdgpu_crtc(crtc);
+                       if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+                               vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+                               break;
+                       }
+               }
+       }
+
+       return vrefresh;
+}
index e2cae97f4ff134c3c45a86914cc01082f682c232..b0243068212b0a0a42639b5c4afaeb4469dba503 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/nospec.h>
 #include <linux/pm_runtime.h>
 #include <asm/processor.h>
-#include "hwmgr.h"
 
 static const struct cg_flag_name clocks[] = {
        {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
@@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum amd_pm_state_type pm;
        int ret;
 
@@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
                return ret;
        }
 
-       if (pp_funcs->get_current_power_state) {
-               pm = amdgpu_dpm_get_current_power_state(adev);
-       } else {
-               pm = adev->pm.dpm.user_state;
-       }
+       amdgpu_dpm_get_current_power_state(adev, &pm);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               mutex_lock(&adev->pm.mutex);
-               adev->pm.dpm.user_state = state;
-               mutex_unlock(&adev->pm.mutex);
-       } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
-               amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
-       } else {
-               mutex_lock(&adev->pm.mutex);
-               adev->pm.dpm.user_state = state;
-               mutex_unlock(&adev->pm.mutex);
+       amdgpu_dpm_set_power_state(adev, state);
 
-               amdgpu_pm_compute_clocks(adev);
-       }
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
@@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->get_performance_level)
-               level = amdgpu_dpm_get_performance_level(adev);
-       else
-               level = adev->pm.dpm.forced_level;
+       level = amdgpu_dpm_get_performance_level(adev);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -318,9 +298,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum amd_dpm_forced_level level;
-       enum amd_dpm_forced_level current_level;
        int ret = 0;
 
        if (amdgpu_in_reset(adev))
@@ -358,57 +336,17 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
                return ret;
        }
 
-       if (pp_funcs->get_performance_level)
-               current_level = amdgpu_dpm_get_performance_level(adev);
-       else
-               current_level = adev->pm.dpm.forced_level;
-
-       if (current_level == level) {
-               pm_runtime_mark_last_busy(ddev->dev);
-               pm_runtime_put_autosuspend(ddev->dev);
-               return count;
-       }
-
-       if (adev->asic_type == CHIP_RAVEN) {
-               if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
-                       if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
-                               amdgpu_gfx_off_ctrl(adev, false);
-                       else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
-                               amdgpu_gfx_off_ctrl(adev, true);
-               }
-       }
-
-       /* profile_exit setting is valid only when current mode is in profile mode */
-       if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
-           AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
-           AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
-           AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
-           (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
-               pr_err("Currently not in any profile mode!\n");
+       mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+       if (amdgpu_dpm_force_performance_level(adev, level)) {
                pm_runtime_mark_last_busy(ddev->dev);
                pm_runtime_put_autosuspend(ddev->dev);
+               mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
                return -EINVAL;
        }
+       /* override whatever a user ctx may have set */
+       adev->pm.stable_pstate_ctx = NULL;
+       mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
 
-       if (pp_funcs->force_performance_level) {
-               mutex_lock(&adev->pm.mutex);
-               if (adev->pm.dpm.thermal_active) {
-                       mutex_unlock(&adev->pm.mutex);
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-               ret = amdgpu_dpm_force_performance_level(adev, level);
-               if (ret) {
-                       mutex_unlock(&adev->pm.mutex);
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               } else {
-                       adev->pm.dpm.forced_level = level;
-               }
-               mutex_unlock(&adev->pm.mutex);
-       }
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
@@ -421,7 +359,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_states_info data;
        uint32_t i;
        int buf_len, ret;
@@ -437,11 +374,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
                return ret;
        }
 
-       if (pp_funcs->get_pp_num_states) {
-               amdgpu_dpm_get_pp_num_states(adev, &data);
-       } else {
+       if (amdgpu_dpm_get_pp_num_states(adev, &data))
                memset(&data, 0, sizeof(data));
-       }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -463,7 +397,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_states_info data = {0};
        enum amd_pm_state_type pm = 0;
        int i = 0, ret = 0;
@@ -479,15 +412,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
                return ret;
        }
 
-       if (pp_funcs->get_current_power_state
-                && pp_funcs->get_pp_num_states) {
-               pm = amdgpu_dpm_get_current_power_state(adev);
-               amdgpu_dpm_get_pp_num_states(adev, &data);
-       }
+       amdgpu_dpm_get_current_power_state(adev, &pm);
+
+       ret = amdgpu_dpm_get_pp_num_states(adev, &data);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
+       if (ret)
+               return ret;
+
        for (i = 0; i < data.nums; i++) {
                if (pm == data.states[i])
                        break;
@@ -511,7 +445,7 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
-       if (adev->pp_force_state_enabled)
+       if (adev->pm.pp_force_state_enabled)
                return amdgpu_get_pp_cur_state(dev, attr, buf);
        else
                return sysfs_emit(buf, "\n");
@@ -525,6 +459,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
        enum amd_pm_state_type state = 0;
+       struct pp_states_info data;
        unsigned long idx;
        int ret;
 
@@ -533,41 +468,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
-       if (strlen(buf) == 1)
-               adev->pp_force_state_enabled = false;
-       else if (is_support_sw_smu(adev))
-               adev->pp_force_state_enabled = false;
-       else if (adev->powerplay.pp_funcs->dispatch_tasks &&
-                       adev->powerplay.pp_funcs->get_pp_num_states) {
-               struct pp_states_info data;
-
-               ret = kstrtoul(buf, 0, &idx);
-               if (ret || idx >= ARRAY_SIZE(data.states))
-                       return -EINVAL;
+       adev->pm.pp_force_state_enabled = false;
 
-               idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
+       if (strlen(buf) == 1)
+               return count;
 
-               amdgpu_dpm_get_pp_num_states(adev, &data);
-               state = data.states[idx];
+       ret = kstrtoul(buf, 0, &idx);
+       if (ret || idx >= ARRAY_SIZE(data.states))
+               return -EINVAL;
 
-               ret = pm_runtime_get_sync(ddev->dev);
-               if (ret < 0) {
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return ret;
-               }
+       idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
 
-               /* only set user selected power states */
-               if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
-                   state != POWER_STATE_TYPE_DEFAULT) {
-                       amdgpu_dpm_dispatch_task(adev,
-                                       AMD_PP_TASK_ENABLE_USER_STATE, &state);
-                       adev->pp_force_state_enabled = true;
-               }
-               pm_runtime_mark_last_busy(ddev->dev);
+       ret = pm_runtime_get_sync(ddev->dev);
+       if (ret < 0) {
                pm_runtime_put_autosuspend(ddev->dev);
+               return ret;
+       }
+
+       ret = amdgpu_dpm_get_pp_num_states(adev, &data);
+       if (ret)
+               goto err_out;
+
+       state = data.states[idx];
+
+       /* only set user selected power states */
+       if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+           state != POWER_STATE_TYPE_DEFAULT) {
+               ret = amdgpu_dpm_dispatch_task(adev,
+                               AMD_PP_TASK_ENABLE_USER_STATE, &state);
+               if (ret)
+                       goto err_out;
+
+               adev->pm.pp_force_state_enabled = true;
        }
 
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+
        return count;
+
+err_out:
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+       return ret;
 }
 
 /**
@@ -601,17 +544,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->get_pp_table) {
-               size = amdgpu_dpm_get_pp_table(adev, &table);
-               pm_runtime_mark_last_busy(ddev->dev);
-               pm_runtime_put_autosuspend(ddev->dev);
-               if (size < 0)
-                       return size;
-       } else {
-               pm_runtime_mark_last_busy(ddev->dev);
-               pm_runtime_put_autosuspend(ddev->dev);
-               return 0;
-       }
+       size = amdgpu_dpm_get_pp_table(adev, &table);
+
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+
+       if (size <= 0)
+               return size;
 
        if (size >= PAGE_SIZE)
                size = PAGE_SIZE - 1;
@@ -642,15 +581,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
        }
 
        ret = amdgpu_dpm_set_pp_table(adev, buf, count);
-       if (ret) {
-               pm_runtime_mark_last_busy(ddev->dev);
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
+       if (ret)
+               return ret;
+
        return count;
 }
 
@@ -866,46 +803,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
-               ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
-                                                       parameter,
-                                                       parameter_size);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       }
+       if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
+                                             type,
+                                             parameter,
+                                             parameter_size))
+               goto err_out;
 
-       if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
-               ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
-                                                   parameter, parameter_size);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       }
+       if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
+                                         parameter, parameter_size))
+               goto err_out;
 
        if (type == PP_OD_COMMIT_DPM_TABLE) {
-               if (adev->powerplay.pp_funcs->dispatch_tasks) {
-                       amdgpu_dpm_dispatch_task(adev,
-                                                AMD_PP_TASK_READJUST_POWER_STATE,
-                                                NULL);
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return count;
-               } else {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
+               if (amdgpu_dpm_dispatch_task(adev,
+                                            AMD_PP_TASK_READJUST_POWER_STATE,
+                                            NULL))
+                       goto err_out;
        }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
        return count;
+
+err_out:
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+       return -EINVAL;
 }
 
 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
@@ -914,8 +837,17 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
+       int size = 0;
        int ret;
+       enum pp_clock_type od_clocks[6] = {
+               OD_SCLK,
+               OD_MCLK,
+               OD_VDDC_CURVE,
+               OD_RANGE,
+               OD_VDDGFX_OFFSET,
+               OD_CCLK,
+       };
+       uint clk_index;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
@@ -928,16 +860,25 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->print_clock_levels) {
+       for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
+               ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
+               if (ret)
+                       break;
+       }
+       if (ret == -ENOENT) {
                size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
-               size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
-               size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
-               size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
-               size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
-               size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
-       } else {
-               size = sysfs_emit(buf, "\n");
+               if (size > 0) {
+                       size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+                       size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+                       size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+                       size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+                       size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
+               }
        }
+
+       if (size == 0)
+               size = sysfs_emit(buf, "\n");
+
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
@@ -985,17 +926,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->set_ppfeature_status) {
-               ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       }
+       ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
+
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
+       if (ret)
+               return -EINVAL;
+
        return count;
 }
 
@@ -1019,9 +957,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->get_ppfeature_status)
-               size = amdgpu_dpm_get_ppfeature_status(adev, buf);
-       else
+       size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+       if (size <= 0)
                size = sysfs_emit(buf, "\n");
 
        pm_runtime_mark_last_busy(ddev->dev);
@@ -1066,8 +1003,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
+       int size = 0;
+       int ret = 0;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
@@ -1080,9 +1017,11 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->print_clock_levels)
+       ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
+       if (ret == -ENOENT)
                size = amdgpu_dpm_print_clock_levels(adev, type, buf);
-       else
+
+       if (size == 0)
                size = sysfs_emit(buf, "\n");
 
        pm_runtime_mark_last_busy(ddev->dev);
@@ -1151,10 +1090,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, type, mask);
-       else
-               ret = 0;
+       ret = amdgpu_dpm_force_clock_level(adev, type, mask);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1305,10 +1241,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               value = 0;
-       else if (adev->powerplay.pp_funcs->get_sclk_od)
-               value = amdgpu_dpm_get_sclk_od(adev);
+       value = amdgpu_dpm_get_sclk_od(adev);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1342,19 +1275,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               value = 0;
-       } else {
-               if (adev->powerplay.pp_funcs->set_sclk_od)
-                       amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
-
-               if (adev->powerplay.pp_funcs->dispatch_tasks) {
-                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
-               } else {
-                       adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
-                       amdgpu_pm_compute_clocks(adev);
-               }
-       }
+       amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1382,10 +1303,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               value = 0;
-       else if (adev->powerplay.pp_funcs->get_mclk_od)
-               value = amdgpu_dpm_get_mclk_od(adev);
+       value = amdgpu_dpm_get_mclk_od(adev);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1419,19 +1337,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               value = 0;
-       } else {
-               if (adev->powerplay.pp_funcs->set_mclk_od)
-                       amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
-
-               if (adev->powerplay.pp_funcs->dispatch_tasks) {
-                       amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
-               } else {
-                       adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
-                       amdgpu_pm_compute_clocks(adev);
-               }
-       }
+       amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1479,9 +1385,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->get_power_profile_mode)
-               size = amdgpu_dpm_get_power_profile_mode(adev, buf);
-       else
+       size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+       if (size <= 0)
                size = sysfs_emit(buf, "\n");
 
        pm_runtime_mark_last_busy(ddev->dev);
@@ -1545,8 +1450,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->set_power_profile_mode)
-               ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+       ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1812,9 +1716,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
                return ret;
        }
 
-       if (adev->powerplay.pp_funcs->get_gpu_metrics)
-               size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
-
+       size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
        if (size <= 0)
                goto out;
 
@@ -2027,8 +1929,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
        AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
        AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
        AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
-       AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC),
-       AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,                           ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+       AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,                              ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
        AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,                               ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,                               ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,                    ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2053,7 +1955,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
 {
        struct device_attribute *dev_attr = &attr->dev_attr;
        const char *attr_name = dev_attr->attr.name;
-       struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
        enum amd_asic_type asic_type = adev->asic_type;
 
        if (!(attr->flags & mask)) {
@@ -2076,9 +1977,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
                *states = ATTR_STATE_UNSUPPORTED;
-               if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
-                   (is_support_sw_smu(adev) && adev->smu.is_apu) ||
-                       (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+               if (amdgpu_dpm_is_overdrive_supported(adev))
                        *states = ATTR_STATE_SUPPORTED;
        } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
                if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2106,8 +2005,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
-               if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
-                   amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+               if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
                        *states = ATTR_STATE_UNSUPPORTED;
        }
 
@@ -2396,17 +2294,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
                return ret;
        }
 
-       if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -EINVAL;
-       }
-
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
+       if (ret)
+               return -EINVAL;
+
        return sysfs_emit(buf, "%u\n", pwm_mode);
 }
 
@@ -2434,17 +2329,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
                return ret;
        }
 
-       if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -EINVAL;
-       }
-
-       amdgpu_dpm_set_fan_control_mode(adev, value);
+       ret = amdgpu_dpm_set_fan_control_mode(adev, value);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
+       if (ret)
+               return -EINVAL;
+
        return count;
 }
 
@@ -2476,32 +2368,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
+       err = kstrtou32(buf, 10, &value);
+       if (err)
+               return err;
+
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+       if (err)
+               goto out;
+
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
                pr_info("manual fan speed control should be enabled first\n");
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -EINVAL;
-       }
-
-       err = kstrtou32(buf, 10, &value);
-       if (err) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return err;
+               err = -EINVAL;
+               goto out;
        }
 
-       if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
-               err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
 
+out:
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2530,10 +2419,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
                return err;
        }
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
-               err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2563,10 +2449,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
                return err;
        }
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
-               err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2660,10 +2543,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
                return err;
        }
 
-       if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
-               err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2688,32 +2568,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
+       err = kstrtou32(buf, 10, &value);
+       if (err)
+               return err;
+
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return err;
        }
 
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+       if (err)
+               goto out;
 
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -ENODATA;
-       }
-
-       err = kstrtou32(buf, 10, &value);
-       if (err) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return err;
+               err = -ENODATA;
+               goto out;
        }
 
-       if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
-               err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
 
+out:
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2742,17 +2618,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
                return ret;
        }
 
-       if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -EINVAL;
-       }
-
-       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
+       if (ret)
+               return -EINVAL;
+
        return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
 }
 
@@ -2788,16 +2661,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
                return err;
        }
 
-       if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return -EINVAL;
-       }
-       amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+       err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
+       if (err)
+               return -EINVAL;
+
        return count;
 }
 
@@ -2933,7 +2804,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
                                        enum pp_power_limit_level pp_limit_level)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
        uint32_t limit;
        ssize_t size;
@@ -2944,16 +2814,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
-       if ( !(pp_funcs && pp_funcs->get_power_limit))
-               return -ENODATA;
-
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
                pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
                return r;
        }
 
-       r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+       r = amdgpu_dpm_get_power_limit(adev, &limit,
                                      pp_limit_level, power_type);
 
        if (!r)
@@ -2996,10 +2863,14 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
-       int limit_type = to_sensor_dev_attr(attr)->index;
+       struct amdgpu_device *adev = dev_get_drvdata(dev);
 
-       return sysfs_emit(buf, "%s\n",
-               limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+       if (adev->asic_type == CHIP_VANGOGH)
+               return sysfs_emit(buf, "%s\n",
+                                 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+                                 "fastPPT" : "slowPPT");
+       else
+               return sysfs_emit(buf, "PPT\n");
 }
 
 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3008,7 +2879,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
                size_t count)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int limit_type = to_sensor_dev_attr(attr)->index;
        int err;
        u32 value;
@@ -3034,10 +2904,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
                return err;
        }
 
-       if (pp_funcs && pp_funcs->set_power_limit)
-               err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
-       else
-               err = -EINVAL;
+       err = amdgpu_dpm_set_power_limit(adev, value);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -3315,19 +3182,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
 
-       /* there is no fan under pp one vf mode */
-       if (amdgpu_sriov_is_pp_one_vf(adev) &&
-           (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
-            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
-            attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
-            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
-            attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
-               return 0;
-
        /* Skip fan attributes if fan is not present */
        if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
@@ -3374,20 +3228,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
                return 0;
 
-       if (!is_support_sw_smu(adev)) {
-               /* mask fan attributes if we have no bindings for this asic to expose */
-               if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
-                    attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
-                   (!adev->powerplay.pp_funcs->get_fan_control_mode &&
-                    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
-                       effective_mode &= ~S_IRUGO;
+       /* mask fan attributes if we have no bindings for this asic to expose */
+       if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+             attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+           ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
+            attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+               effective_mode &= ~S_IRUGO;
 
-               if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
-                    attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
-                   (!adev->powerplay.pp_funcs->set_fan_control_mode &&
-                    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
-                       effective_mode &= ~S_IWUSR;
-       }
+       if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+             attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+             ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
+             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+               effective_mode &= ~S_IWUSR;
 
        if (((adev->family == AMDGPU_FAMILY_SI) ||
                 ((adev->flags & AMD_IS_APU) &&
@@ -3404,22 +3256,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
            (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
                return 0;
 
-       if (!is_support_sw_smu(adev)) {
-               /* hide max/min values if we can't both query and manage the fan */
-               if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
-                    !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
-                    (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
-                    !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
-                   (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
-                    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
-                       return 0;
+       /* hide max/min values if we can't both query and manage the fan */
+       if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+             (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+             (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+             (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
+           (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+            attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+               return 0;
 
-               if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
-                    !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
-                   (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
-                    attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
-                       return 0;
-       }
+       if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+            (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
+            (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+            attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+               return 0;
 
        if ((adev->family == AMDGPU_FAMILY_SI ||        /* not implemented yet */
             adev->family == AMDGPU_FAMILY_KV) &&       /* not implemented yet */
@@ -3462,8 +3312,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
             attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
-                attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
-                attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+                attr == &sensor_dev_attr_power2_label.dev_attr.attr))
                return 0;
 
        return effective_mode;
@@ -3549,14 +3398,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
        uint16_t *p_val;
        uint32_t size;
        int i;
+       uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
 
-       if (is_support_cclk_dpm(adev)) {
-               p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+       if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
+               p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
                                GFP_KERNEL);
 
                if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
                                            (void *)p_val, &size)) {
-                       for (i = 0; i < adev->smu.cpu_core_num; i++)
+                       for (i = 0; i < num_cpu_cores; i++)
                                seq_printf(m, "\t%u MHz (CPU%d)\n",
                                           *(p_val + i), i);
                }
@@ -3684,27 +3534,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
                return r;
        }
 
-       if (!adev->pm.dpm_enabled) {
-               seq_printf(m, "dpm not enabled\n");
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-               return 0;
-       }
-
-       if (!is_support_sw_smu(adev) &&
-           adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
-               mutex_lock(&adev->pm.mutex);
-               if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
-                       adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
-               else
-                       seq_printf(m, "Debugfs support not implemented for this asic\n");
-               mutex_unlock(&adev->pm.mutex);
-               r = 0;
-       } else {
+       if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
                r = amdgpu_debugfs_pm_info_pp(m, adev);
+               if (r)
+                       goto out;
        }
-       if (r)
-               goto out;
 
        amdgpu_device_ip_get_clockgating_state(adev, &flags);
 
@@ -3730,21 +3564,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
                                         size_t size, loff_t *pos)
 {
        struct amdgpu_device *adev = file_inode(f)->i_private;
-       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       void *pp_handle = adev->powerplay.pp_handle;
        size_t smu_prv_buf_size;
        void *smu_prv_buf;
+       int ret = 0;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
        if (adev->in_suspend && !adev->in_runpm)
                return -EPERM;
 
-       if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
-               pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
-                                                 &smu_prv_buf_size);
-       else
-               return -ENOSYS;
+       ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
+       if (ret)
+               return ret;
 
        if (!smu_prv_buf || !smu_prv_buf_size)
                return -EINVAL;
@@ -3768,6 +3599,9 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
        struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
 
+       if (!adev->pm.dpm_enabled)
+               return;
+
        debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
                            &amdgpu_debugfs_pm_info_fops);
 
@@ -3777,6 +3611,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
                                         &amdgpu_debugfs_pm_prv_buffer_fops,
                                         adev->pm.smu_prv_buffer_size);
 
-       amdgpu_smu_stb_debug_fs_init(adev);
+       amdgpu_dpm_stb_debug_fs_init(adev);
 #endif
 }
index c464a045000dd5ea660bfcea1584f5f5443c2831..43d6b57173a361657f03de5252dc8ba42128d5a3 100644 (file)
 #ifndef __AMDGPU_DPM_H__
 #define __AMDGPU_DPM_H__
 
+/* Argument for PPSMC_MSG_GpuChangeState */
+enum gfx_change_state {
+       sGpuChangeState_D0Entry = 1,
+       sGpuChangeState_D3Entry,
+};
+
 enum amdgpu_int_thermal_type {
        THERMAL_TYPE_NONE,
        THERMAL_TYPE_EXTERNAL,
@@ -39,19 +45,6 @@ enum amdgpu_int_thermal_type {
        THERMAL_TYPE_KV,
 };
 
-enum amdgpu_dpm_auto_throttle_src {
-       AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
-       AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
-       AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
-       AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
-       AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
-       AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
-       AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
 struct amdgpu_ps {
        u32 caps; /* vbios flags */
        u32 class; /* vbios flags */
@@ -95,19 +88,6 @@ struct amdgpu_dpm_thermal {
        struct amdgpu_irq_src   irq;
 };
 
-enum amdgpu_clk_action
-{
-       AMDGPU_SCLK_UP = 1,
-       AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
-       u32 sclk;
-       u32 mclk;
-       enum amdgpu_clk_action action;
-};
-
 struct amdgpu_clock_and_voltage_limits {
        u32 sclk;
        u32 mclk;
@@ -246,128 +226,6 @@ struct amdgpu_dpm_fan {
        bool ucode_fan_control;
 };
 
-enum amdgpu_pcie_gen {
-       AMDGPU_PCIE_GEN1 = 0,
-       AMDGPU_PCIE_GEN2 = 1,
-       AMDGPU_PCIE_GEN3 = 2,
-       AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) \
-               ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_power_state(adev) \
-               ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_post_set_power_state(adev) \
-               ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_display_configuration_changed(adev) \
-               ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_print_power_state(adev, ps) \
-               ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
-
-#define amdgpu_dpm_vblank_too_short(adev) \
-               ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_enable_bapm(adev, e) \
-               ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
-               ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
-               ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
-               ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
-               ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
-               ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
-               ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
-               ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
-               ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
-               ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
-               ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
-               ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
-               ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
-               ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
-
-#define amdgpu_dpm_get_sclk_od(adev) \
-               ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
-               ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_get_mclk_od(adev) \
-               ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
-               ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, task_id, user_state)            \
-               ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
-
-#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
-               ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i)                                \
-               ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-
-#define amdgpu_dpm_get_performance_level(adev)                         \
-               ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
-               ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
-                       (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
-               ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
-                       (adev)->powerplay.pp_handle, buf))
-
-#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
-               ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
-                       (adev)->powerplay.pp_handle, parameter, size))
-
-#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
-               ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
-                       (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
-               ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
-                       (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
-               ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
-                       (adev)->powerplay.pp_handle, (buf)))
-
-#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
-               ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
-                       (adev)->powerplay.pp_handle, (ppfeatures)))
-
-#define amdgpu_dpm_get_gpu_metrics(adev, table) \
-               ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
-
 struct amdgpu_dpm {
        struct amdgpu_ps        *ps;
        /* number of valid power states */
@@ -426,6 +284,15 @@ enum ip_power_state {
 /* Used to mask smu debug modes */
 #define SMU_DEBUG_HALT_ON_ERROR                0x1
 
+#define MAX_SMU_I2C_BUSES       2
+
+struct amdgpu_smu_i2c_bus {
+       struct i2c_adapter adapter;
+       struct amdgpu_device *adev;
+       int port;
+       struct mutex mutex;
+};
+
 struct amdgpu_pm {
        struct mutex            mutex;
        u32                     current_sclk;
@@ -458,8 +325,9 @@ struct amdgpu_pm {
        uint32_t pp_feature;
 
        /* Used for I2C access to various EEPROMs on relevant ASICs */
-       struct i2c_adapter smu_i2c;
-       struct mutex            smu_i2c_mutex;
+       struct amdgpu_smu_i2c_bus smu_i2c[MAX_SMU_I2C_BUSES];
+       struct i2c_adapter     *ras_eeprom_i2c_bus;
+       struct i2c_adapter     *fru_eeprom_i2c_bus;
        struct list_head        pm_attr_list;
 
        atomic_t                pwr_state[AMD_IP_BLOCK_TYPE_NUM];
@@ -468,64 +336,16 @@ struct amdgpu_pm {
         * 0 = disabled (default), otherwise enable corresponding debug mode
         */
        uint32_t                smu_debug_mask;
-};
 
-#define R600_SSTU_DFLT                               0
-#define R600_SST_DFLT                                0x00C8
+       bool                    pp_force_state_enabled;
 
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC  1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum amdgpu_td {
-       AMDGPU_TD_AUTO,
-       AMDGPU_TD_UP,
-       AMDGPU_TD_DOWN,
+       struct mutex            stable_pstate_ctx_lock;
+       struct amdgpu_ctx       *stable_pstate_ctx;
 };
 
-enum amdgpu_display_watermark {
-       AMDGPU_DISPLAY_WATERMARK_LOW = 0,
-       AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum amdgpu_display_gap
-{
-    AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
-    AMDGPU_PM_DISPLAY_GAP_VBLANK       = 1,
-    AMDGPU_PM_DISPLAY_GAP_WATERMARK    = 2,
-    AMDGPU_PM_DISPLAY_GAP_IGNORE       = 3,
-};
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
-                               struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
                           void *data, uint32_t *size);
 
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
-                                                u32 sys_mask,
-                                                enum amdgpu_pcie_gen asic_gen,
-                                                enum amdgpu_pcie_gen default_gen);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx);
-
 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
                                      uint32_t block_type, bool gate);
 
@@ -571,16 +391,139 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
 
 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
 
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
-                          void *data, uint32_t *size);
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+                                      enum pp_clock_type type,
+                                      uint32_t *min,
+                                      uint32_t *max);
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+                                       enum pp_clock_type type,
+                                       uint32_t min,
+                                       uint32_t max);
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+                      uint64_t event_arg);
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+                                enum gfx_change_state state);
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+                           void *umc_ecc);
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+                                                    uint32_t idx);
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+                               enum amd_pm_state_type state);
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+                                      enum amd_dpm_forced_level level);
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+                                struct pp_states_info *states);
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+                             enum amd_pp_task task_id,
+                             enum amd_pm_state_type *user_state);
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table);
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+                                     uint32_t type,
+                                     long *input,
+                                     uint32_t size);
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+                                 uint32_t type,
+                                 long *input,
+                                 uint32_t size);
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+                                 enum pp_clock_type type,
+                                 char *buf);
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+                                 enum pp_clock_type type,
+                                 char *buf,
+                                 int *offset);
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+                                   uint64_t ppfeature_masks);
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+                                enum pp_clock_type type,
+                                uint32_t mask);
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+                                     char *buf);
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+                                     long *input, uint32_t size);
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+                                   uint32_t *fan_mode);
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+                                uint32_t speed);
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+                                uint32_t *speed);
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+                                uint32_t *speed);
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+                                uint32_t speed);
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+                                   uint32_t mode);
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+                              uint32_t *limit,
+                              enum pp_power_limit_level pp_limit_level,
+                              enum pp_power_type power_type);
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+                              uint32_t limit);
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+                                                      struct seq_file *m);
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+                                      void **addr,
+                                      size_t *size);
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+                           const char *buf,
+                           size_t size);
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev);
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev);
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+                                           const struct amd_pp_display_configuration *input);
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+                                enum amd_pp_clock_type type,
+                                struct amd_pp_clocks *clocks);
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+                                               struct amd_pp_simple_clock_info *clocks);
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_latency *clocks);
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_voltage *clocks);
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+                                              void *clock_ranges);
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+                                            struct pp_display_clock_request *clock);
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+                                 struct amd_pp_clock_info *clocks);
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev);
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+                                       uint32_t count);
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+                                         uint32_t clock);
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+                                            uint32_t clock);
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+                                         uint32_t clock);
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+                                                  bool disable_memory_clock_switch);
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+                                               struct pp_smu_nv_clock_table *max_clocks);
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+                                                 unsigned int *clock_values_in_khz,
+                                                 unsigned int *num_states);
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+                                  struct dpm_clocks *clock_table);
 #endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
new file mode 100644 (file)
index 0000000..5c2a89f
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DPM_INTERNAL_H__
+#define __AMDGPU_DPM_INTERNAL_H__
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
new file mode 100644 (file)
index 0000000..baa4265
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright 2021 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+AMD_LEGACYDPM_PATH = ../pm/legacy-dpm
+
+LEGACYDPM_MGR-y = legacy_dpm.o
+
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
+
+AMD_LEGACYDPM_POWER = $(addprefix $(AMD_LEGACYDPM_PATH)/,$(LEGACYDPM_MGR-y))
+
+AMD_POWERPLAY_FILES += $(AMD_LEGACYDPM_POWER)
similarity index 99%
rename from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
rename to drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index bcae42cef37434de0c1840c85d0916c873c77788..8b23cc9f098adcae559e47a3ace3cb9e40ab2d91 100644 (file)
@@ -36,6 +36,7 @@
 
 #include "gca/gfx_7_2_d.h"
 #include "gca/gfx_7_2_sh_mask.h"
+#include "legacy_dpm.h"
 
 #define KV_MAX_DEEPSLEEP_DIVIDER_ID     5
 #define KV_MINIMUM_ENGINE_CLOCK         800
@@ -1256,6 +1257,19 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
        }
 }
 
+static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
+{
+       switch (sensor) {
+       case THERMAL_TYPE_KV:
+               return true;
+       case THERMAL_TYPE_NONE:
+       case THERMAL_TYPE_EXTERNAL:
+       case THERMAL_TYPE_EXTERNAL_GPIO:
+       default:
+               return false;
+       }
+}
+
 static int kv_dpm_enable(struct amdgpu_device *adev)
 {
        struct kv_power_info *pi = kv_get_pi(adev);
@@ -1352,7 +1366,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
        }
 
        if (adev->irq.installed &&
-           amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+           kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
                ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
                if (ret) {
                        DRM_ERROR("kv_set_thermal_temperature_range failed\n");
@@ -3016,21 +3030,18 @@ static int kv_dpm_sw_init(void *handle)
                return 0;
 
        INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
-       mutex_lock(&adev->pm.mutex);
        ret = kv_dpm_init(adev);
        if (ret)
                goto dpm_failed;
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
        return 0;
 
 dpm_failed:
        kv_dpm_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
        DRM_ERROR("amdgpu: dpm initialization failed\n");
        return ret;
 }
@@ -3041,9 +3052,7 @@ static int kv_dpm_sw_fini(void *handle)
 
        flush_work(&adev->pm.dpm.thermal.work);
 
-       mutex_lock(&adev->pm.mutex);
        kv_dpm_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
 
        return 0;
 }
@@ -3056,15 +3065,13 @@ static int kv_dpm_hw_init(void *handle)
        if (!amdgpu_dpm)
                return 0;
 
-       mutex_lock(&adev->pm.mutex);
        kv_dpm_setup_asic(adev);
        ret = kv_dpm_enable(adev);
        if (ret)
                adev->pm.dpm_enabled = false;
        else
                adev->pm.dpm_enabled = true;
-       mutex_unlock(&adev->pm.mutex);
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_legacy_dpm_compute_clocks(adev);
        return ret;
 }
 
@@ -3072,11 +3079,8 @@ static int kv_dpm_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->pm.dpm_enabled) {
-               mutex_lock(&adev->pm.mutex);
+       if (adev->pm.dpm_enabled)
                kv_dpm_disable(adev);
-               mutex_unlock(&adev->pm.mutex);
-       }
 
        return 0;
 }
@@ -3086,12 +3090,10 @@ static int kv_dpm_suspend(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->pm.dpm_enabled) {
-               mutex_lock(&adev->pm.mutex);
                /* disable dpm */
                kv_dpm_disable(adev);
                /* reset the power state */
                adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
-               mutex_unlock(&adev->pm.mutex);
        }
        return 0;
 }
@@ -3103,16 +3105,14 @@ static int kv_dpm_resume(void *handle)
 
        if (adev->pm.dpm_enabled) {
                /* asic init will reset to the boot state */
-               mutex_lock(&adev->pm.mutex);
                kv_dpm_setup_asic(adev);
                ret = kv_dpm_enable(adev);
                if (ret)
                        adev->pm.dpm_enabled = false;
                else
                        adev->pm.dpm_enabled = true;
-               mutex_unlock(&adev->pm.mutex);
                if (adev->pm.dpm_enabled)
-                       amdgpu_pm_compute_clocks(adev);
+                       amdgpu_legacy_dpm_compute_clocks(adev);
        }
        return 0;
 }
@@ -3366,6 +3366,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
        .get_vce_clock_state = amdgpu_get_vce_clock_state,
        .check_state_equal = kv_check_state_equal,
        .read_sensor = &kv_dpm_read_sensor,
+       .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
 };
 
 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
new file mode 100644 (file)
index 0000000..9613c61
--- /dev/null
@@ -0,0 +1,1081 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "amdgpu_atombios.h"
+#include "atom.h"
+#include "amd_pcie.h"
+#include "legacy_dpm.h"
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
+
+#define amdgpu_dpm_pre_set_power_state(adev) \
+               ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+               ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+               ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_print_power_state(adev, ps) \
+               ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+               ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+               ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+{
+       const char *s;
+
+       switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+       case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+       default:
+               s = "none";
+               break;
+       case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+               s = "battery";
+               break;
+       case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+               s = "balanced";
+               break;
+       case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+               s = "performance";
+               break;
+       }
+       printk("\tui class: %s\n", s);
+       printk("\tinternal class:");
+       if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
+           (class2 == 0))
+               pr_cont(" none");
+       else {
+               if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+                       pr_cont(" boot");
+               if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+                       pr_cont(" thermal");
+               if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+                       pr_cont(" limited_pwr");
+               if (class & ATOM_PPLIB_CLASSIFICATION_REST)
+                       pr_cont(" rest");
+               if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
+                       pr_cont(" forced");
+               if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+                       pr_cont(" 3d_perf");
+               if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
+                       pr_cont(" ovrdrv");
+               if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+                       pr_cont(" uvd");
+               if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
+                       pr_cont(" 3d_low");
+               if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+                       pr_cont(" acpi");
+               if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+                       pr_cont(" uvd_hd2");
+               if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+                       pr_cont(" uvd_hd");
+               if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+                       pr_cont(" uvd_sd");
+               if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+                       pr_cont(" limited_pwr2");
+               if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+                       pr_cont(" ulv");
+               if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+                       pr_cont(" uvd_mvc");
+       }
+       pr_cont("\n");
+}
+
+void amdgpu_dpm_print_cap_info(u32 caps)
+{
+       printk("\tcaps:");
+       if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+               pr_cont(" single_disp");
+       if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
+               pr_cont(" video");
+       if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
+               pr_cont(" no_dc");
+       pr_cont("\n");
+}
+
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+                               struct amdgpu_ps *rps)
+{
+       printk("\tstatus:");
+       if (rps == adev->pm.dpm.current_ps)
+               pr_cont(" c");
+       if (rps == adev->pm.dpm.requested_ps)
+               pr_cont(" r");
+       if (rps == adev->pm.dpm.boot_ps)
+               pr_cont(" b");
+       pr_cont("\n");
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+       int i;
+
+       if (adev->powerplay.pp_funcs->print_power_state == NULL)
+               return;
+
+       for (i = 0; i < adev->pm.dpm.num_ps; i++)
+               amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+
+}
+
+union power_info {
+       struct _ATOM_POWERPLAY_INFO info;
+       struct _ATOM_POWERPLAY_INFO_V2 info_2;
+       struct _ATOM_POWERPLAY_INFO_V3 info_3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+       struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+       struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+       struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+       struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+int amdgpu_get_platform_caps(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       union power_info *power_info;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset))
+               return -EINVAL;
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+       adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+       adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+       adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+       return 0;
+}
+
+union fan_info {
+       struct _ATOM_PPLIB_FANTABLE fan;
+       struct _ATOM_PPLIB_FANTABLE2 fan2;
+       struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
+                                             ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
+{
+       u32 size = atom_table->ucNumEntries *
+               sizeof(struct amdgpu_clock_voltage_dependency_entry);
+       int i;
+       ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
+
+       amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
+       if (!amdgpu_table->entries)
+               return -ENOMEM;
+
+       entry = &atom_table->entries[0];
+       for (i = 0; i < atom_table->ucNumEntries; i++) {
+               amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+                       (entry->ucClockHigh << 16);
+               amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+               entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+                       ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
+       }
+       amdgpu_table->count = atom_table->ucNumEntries;
+
+       return 0;
+}
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
+
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       union power_info *power_info;
+       union fan_info *fan_info;
+       ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       u16 data_offset;
+       u8 frev, crev;
+       int ret, i;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset))
+               return -EINVAL;
+       power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+       /* fan table */
+       if (le16_to_cpu(power_info->pplib.usTableSize) >=
+           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+               if (power_info->pplib3.usFanTableOffset) {
+                       fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
+                                                     le16_to_cpu(power_info->pplib3.usFanTableOffset));
+                       adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
+                       adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
+                       adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
+                       adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
+                       adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
+                       adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
+                       adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
+                       if (fan_info->fan.ucFanTableFormat >= 2)
+                               adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
+                       else
+                               adev->pm.dpm.fan.t_max = 10900;
+                       adev->pm.dpm.fan.cycle_delay = 100000;
+                       if (fan_info->fan.ucFanTableFormat >= 3) {
+                               adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+                               adev->pm.dpm.fan.default_max_fan_pwm =
+                                       le16_to_cpu(fan_info->fan3.usFanPWMMax);
+                               adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+                               adev->pm.dpm.fan.fan_output_sensitivity =
+                                       le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+                       }
+                       adev->pm.dpm.fan.ucode_fan_control = true;
+               }
+       }
+
+       /* clock dependancy tables, shedding tables */
+       if (le16_to_cpu(power_info->pplib.usTableSize) >=
+           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
+               if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
+                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+                                                                dep_table);
+                       if (ret) {
+                               amdgpu_free_extended_power_table(adev);
+                               return ret;
+                       }
+               }
+               if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+                                                                dep_table);
+                       if (ret) {
+                               amdgpu_free_extended_power_table(adev);
+                               return ret;
+                       }
+               }
+               if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+                                                                dep_table);
+                       if (ret) {
+                               amdgpu_free_extended_power_table(adev);
+                               return ret;
+                       }
+               }
+               if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+                       ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+                                                                dep_table);
+                       if (ret) {
+                               amdgpu_free_extended_power_table(adev);
+                               return ret;
+                       }
+               }
+               if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+                       ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+                               (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
+                       if (clk_v->ucNumEntries) {
+                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
+                                       le16_to_cpu(clk_v->entries[0].usSclkLow) |
+                                       (clk_v->entries[0].ucSclkHigh << 16);
+                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
+                                       le16_to_cpu(clk_v->entries[0].usMclkLow) |
+                                       (clk_v->entries[0].ucMclkHigh << 16);
+                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
+                                       le16_to_cpu(clk_v->entries[0].usVddc);
+                               adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
+                                       le16_to_cpu(clk_v->entries[0].usVddci);
+                       }
+               }
+               if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
+                       ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
+                               (ATOM_PPLIB_PhaseSheddingLimits_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+                       ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
+
+                       adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
+                               kcalloc(psl->ucNumEntries,
+                                       sizeof(struct amdgpu_phase_shedding_limits_entry),
+                                       GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+
+                       entry = &psl->entries[0];
+                       for (i = 0; i < psl->ucNumEntries; i++) {
+                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
+                                       le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
+                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
+                                       le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
+                               adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
+                                       le16_to_cpu(entry->usVoltage);
+                               entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
+                       }
+                       adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
+                               psl->ucNumEntries;
+               }
+       }
+
+       /* cac data */
+       if (le16_to_cpu(power_info->pplib.usTableSize) >=
+           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
+               adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
+               adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
+               adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
+               adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
+               if (adev->pm.dpm.tdp_od_limit)
+                       adev->pm.dpm.power_control = true;
+               else
+                       adev->pm.dpm.power_control = false;
+               adev->pm.dpm.tdp_adjustment = 0;
+               adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
+               adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
+               adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
+               if (power_info->pplib5.usCACLeakageTableOffset) {
+                       ATOM_PPLIB_CAC_Leakage_Table *cac_table =
+                               (ATOM_PPLIB_CAC_Leakage_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+                       ATOM_PPLIB_CAC_Leakage_Record *entry;
+                       u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+                       adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       entry = &cac_table->entries[0];
+                       for (i = 0; i < cac_table->ucNumEntries; i++) {
+                               if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+                                               le16_to_cpu(entry->usVddc1);
+                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+                                               le16_to_cpu(entry->usVddc2);
+                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+                                               le16_to_cpu(entry->usVddc3);
+                               } else {
+                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+                                               le16_to_cpu(entry->usVddc);
+                                       adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+                                               le32_to_cpu(entry->ulLeakageValue);
+                               }
+                               entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
+                       }
+                       adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
+               }
+       }
+
+       /* ext tables */
+       if (le16_to_cpu(power_info->pplib.usTableSize) >=
+           sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+               ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
+                       (mode_info->atom_context->bios + data_offset +
+                        le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+                       ext_hdr->usVCETableOffset) {
+                       VCEClockInfoArray *array = (VCEClockInfoArray *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+                       ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+                               (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+                                1 + array->ucNumEntries * sizeof(VCEClockInfo));
+                       ATOM_PPLIB_VCE_State_Table *states =
+                               (ATOM_PPLIB_VCE_State_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+                                1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+                                1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
+                       ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+                       ATOM_PPLIB_VCE_State_Record *state_entry;
+                       VCEClockInfo *vce_clk;
+                       u32 size = limits->numEntries *
+                               sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+                       adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+                               kzalloc(size, GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+                               limits->numEntries;
+                       entry = &limits->entries[0];
+                       state_entry = &states->entries[0];
+                       for (i = 0; i < limits->numEntries; i++) {
+                               vce_clk = (VCEClockInfo *)
+                                       ((u8 *)&array->entries[0] +
+                                        (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+                                       le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+                                       le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+                               adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+                                       le16_to_cpu(entry->usVoltage);
+                               entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+                       }
+                       adev->pm.dpm.num_of_vce_states =
+                                       states->numEntries > AMD_MAX_VCE_LEVELS ?
+                                       AMD_MAX_VCE_LEVELS : states->numEntries;
+                       for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
+                               vce_clk = (VCEClockInfo *)
+                                       ((u8 *)&array->entries[0] +
+                                        (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+                               adev->pm.dpm.vce_states[i].evclk =
+                                       le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+                               adev->pm.dpm.vce_states[i].ecclk =
+                                       le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+                               adev->pm.dpm.vce_states[i].clk_idx =
+                                       state_entry->ucClockInfoIndex & 0x3f;
+                               adev->pm.dpm.vce_states[i].pstate =
+                                       (state_entry->ucClockInfoIndex & 0xc0) >> 6;
+                               state_entry = (ATOM_PPLIB_VCE_State_Record *)
+                                       ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+                       }
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+                       ext_hdr->usUVDTableOffset) {
+                       UVDClockInfoArray *array = (UVDClockInfoArray *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+                       ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+                               (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+                                1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+                       ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+                       u32 size = limits->numEntries *
+                               sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+                       adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+                               kzalloc(size, GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+                               limits->numEntries;
+                       entry = &limits->entries[0];
+                       for (i = 0; i < limits->numEntries; i++) {
+                               UVDClockInfo *uvd_clk = (UVDClockInfo *)
+                                       ((u8 *)&array->entries[0] +
+                                        (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+                                       le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+                                       le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+                               adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+                                       le16_to_cpu(entry->usVoltage);
+                               entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+                       }
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+                       ext_hdr->usSAMUTableOffset) {
+                       ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+                               (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+                       ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+                       u32 size = limits->numEntries *
+                               sizeof(struct amdgpu_clock_voltage_dependency_entry);
+                       adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+                               kzalloc(size, GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+                               limits->numEntries;
+                       entry = &limits->entries[0];
+                       for (i = 0; i < limits->numEntries; i++) {
+                               adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+                                       le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+                               adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+                                       le16_to_cpu(entry->usVoltage);
+                               entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+                       }
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
+                   ext_hdr->usPPMTableOffset) {
+                       ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usPPMTableOffset));
+                       adev->pm.dpm.dyn_state.ppm_table =
+                               kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.ppm_table) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+                       adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+                               le16_to_cpu(ppm->usCpuCoreNumber);
+                       adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
+                               le32_to_cpu(ppm->ulPlatformTDP);
+                       adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
+                               le32_to_cpu(ppm->ulSmallACPlatformTDP);
+                       adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
+                               le32_to_cpu(ppm->ulPlatformTDC);
+                       adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
+                               le32_to_cpu(ppm->ulSmallACPlatformTDC);
+                       adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
+                               le32_to_cpu(ppm->ulApuTDP);
+                       adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
+                               le32_to_cpu(ppm->ulDGpuTDP);
+                       adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
+                               le32_to_cpu(ppm->ulDGpuUlvPower);
+                       adev->pm.dpm.dyn_state.ppm_table->tj_max =
+                               le32_to_cpu(ppm->ulTjmax);
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+                       ext_hdr->usACPTableOffset) {
+                       ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+                               (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+                       ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+                       u32 size = limits->numEntries *
+                               sizeof(struct amdgpu_clock_voltage_dependency_entry);
+                       adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+                               kzalloc(size, GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+                               limits->numEntries;
+                       entry = &limits->entries[0];
+                       for (i = 0; i < limits->numEntries; i++) {
+                               adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+                                       le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+                               adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+                                       le16_to_cpu(entry->usVoltage);
+                               entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+                                       ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+                       }
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+                       ext_hdr->usPowerTuneTableOffset) {
+                       u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+                       ATOM_PowerTune_Table *pt;
+                       adev->pm.dpm.dyn_state.cac_tdp_table =
+                               kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+                       if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+                               amdgpu_free_extended_power_table(adev);
+                               return -ENOMEM;
+                       }
+                       if (rev > 0) {
+                               ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+                                       (mode_info->atom_context->bios + data_offset +
+                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+                               adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+                                       ppt->usMaximumPowerDeliveryLimit;
+                               pt = &ppt->power_tune_table;
+                       } else {
+                               ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+                                       (mode_info->atom_context->bios + data_offset +
+                                        le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+                               adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+                               pt = &ppt->power_tune_table;
+                       }
+                       adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+                               le16_to_cpu(pt->usConfigurableTDP);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+                               le16_to_cpu(pt->usBatteryPowerLimit);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+                               le16_to_cpu(pt->usSmallPowerLimit);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+                               le16_to_cpu(pt->usLowCACLeakage);
+                       adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+                               le16_to_cpu(pt->usHighCACLeakage);
+               }
+               if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
+                               ext_hdr->usSclkVddgfxTableOffset) {
+                       dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+                               (mode_info->atom_context->bios + data_offset +
+                                le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
+                       ret = amdgpu_parse_clk_voltage_dep_table(
+                                       &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+                                       dep_table);
+                       if (ret) {
+                               kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+                               return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
+{
+       struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
+
+       kfree(dyn_state->vddc_dependency_on_sclk.entries);
+       kfree(dyn_state->vddci_dependency_on_mclk.entries);
+       kfree(dyn_state->vddc_dependency_on_mclk.entries);
+       kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+       kfree(dyn_state->cac_leakage_table.entries);
+       kfree(dyn_state->phase_shedding_limits_table.entries);
+       kfree(dyn_state->ppm_table);
+       kfree(dyn_state->cac_tdp_table);
+       kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+       kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+       kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+       kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
+       kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
+}
+
+static const char *pp_lib_thermal_controller_names[] = {
+       "NONE",
+       "lm63",
+       "adm1032",
+       "adm1030",
+       "max6649",
+       "lm64",
+       "f75375",
+       "RV6xx",
+       "RV770",
+       "adt7473",
+       "NONE",
+       "External GPIO",
+       "Evergreen",
+       "emc2103",
+       "Sumo",
+       "Northern Islands",
+       "Southern Islands",
+       "lm96163",
+       "Sea Islands",
+       "Kaveri/Kabini",
+};
+
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       ATOM_PPLIB_POWERPLAYTABLE *power_table;
+       int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+       ATOM_PPLIB_THERMALCONTROLLER *controller;
+       struct amdgpu_i2c_bus_rec i2c_bus;
+       u16 data_offset;
+       u8 frev, crev;
+
+       if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+                                  &frev, &crev, &data_offset))
+               return;
+       power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
+               (mode_info->atom_context->bios + data_offset);
+       controller = &power_table->sThermalController;
+
+       /* add the i2c bus for thermal/fan chip */
+       if (controller->ucType > 0) {
+               if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+                       adev->pm.no_fan = true;
+               adev->pm.fan_pulses_per_revolution =
+                       controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+               if (adev->pm.fan_pulses_per_revolution) {
+                       adev->pm.fan_min_rpm = controller->ucFanMinRPM;
+                       adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+               }
+               if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_NI;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_SI;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_CI;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+                       DRM_INFO("Internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_KV;
+               } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+                       DRM_INFO("External GPIO thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+                       DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+               } else if (controller->ucType ==
+                          ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+                       DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+               } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+                       DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+                                pp_lib_thermal_controller_names[controller->ucType],
+                                controller->ucI2cAddress >> 1,
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+                       adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+                       i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
+                       adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
+                       if (adev->pm.i2c_bus) {
+                               struct i2c_board_info info = { };
+                               const char *name = pp_lib_thermal_controller_names[controller->ucType];
+                               info.addr = controller->ucI2cAddress >> 1;
+                               strlcpy(info.type, name, sizeof(info.type));
+                               i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
+                       }
+               } else {
+                       DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+                                controller->ucType,
+                                controller->ucI2cAddress >> 1,
+                                (controller->ucFanParameters &
+                                 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+               }
+       }
+}
+
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (idx < adev->pm.dpm.num_of_vce_states)
+               return &adev->pm.dpm.vce_states[idx];
+
+       return NULL;
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+                                                    enum amd_pm_state_type dpm_state)
+{
+       int i;
+       struct amdgpu_ps *ps;
+       u32 ui_class;
+       bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+               true : false;
+
+       /* check if the vblank period is too short to adjust the mclk */
+       if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+               if (amdgpu_dpm_vblank_too_short(adev))
+                       single_display = false;
+       }
+
+       /* certain older asics have a separare 3D performance state,
+        * so try that first if the user selected performance
+        */
+       if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+               dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+       /* balanced states don't exist at the moment */
+       if (dpm_state == POWER_STATE_TYPE_BALANCED)
+               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+       /* Pick the best power state based on current conditions */
+       for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+               ps = &adev->pm.dpm.ps[i];
+               ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+               switch (dpm_state) {
+               /* user states */
+               case POWER_STATE_TYPE_BATTERY:
+                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+                                       if (single_display)
+                                               return ps;
+                               } else
+                                       return ps;
+                       }
+                       break;
+               case POWER_STATE_TYPE_BALANCED:
+                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+                                       if (single_display)
+                                               return ps;
+                               } else
+                                       return ps;
+                       }
+                       break;
+               case POWER_STATE_TYPE_PERFORMANCE:
+                       if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+                               if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+                                       if (single_display)
+                                               return ps;
+                               } else
+                                       return ps;
+                       }
+                       break;
+               /* internal states */
+               case POWER_STATE_TYPE_INTERNAL_UVD:
+                       if (adev->pm.dpm.uvd_ps)
+                               return adev->pm.dpm.uvd_ps;
+                       else
+                               break;
+               case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+                       if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_BOOT:
+                       return adev->pm.dpm.boot_ps;
+               case POWER_STATE_TYPE_INTERNAL_THERMAL:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_ACPI:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_ULV:
+                       if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+                               return ps;
+                       break;
+               case POWER_STATE_TYPE_INTERNAL_3DPERF:
+                       if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+                               return ps;
+                       break;
+               default:
+                       break;
+               }
+       }
+       /* use a fallback state if we didn't match */
+       switch (dpm_state) {
+       case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+               dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+               goto restart_search;
+       case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+       case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+       case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+               if (adev->pm.dpm.uvd_ps) {
+                       return adev->pm.dpm.uvd_ps;
+               } else {
+                       dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+                       goto restart_search;
+               }
+       case POWER_STATE_TYPE_INTERNAL_THERMAL:
+               dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+               goto restart_search;
+       case POWER_STATE_TYPE_INTERNAL_ACPI:
+               dpm_state = POWER_STATE_TYPE_BATTERY;
+               goto restart_search;
+       case POWER_STATE_TYPE_BATTERY:
+       case POWER_STATE_TYPE_BALANCED:
+       case POWER_STATE_TYPE_INTERNAL_3DPERF:
+               dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+               goto restart_search;
+       default:
+               break;
+       }
+
+       return NULL;
+}
+
+static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct amdgpu_ps *ps;
+       enum amd_pm_state_type dpm_state;
+       int ret;
+       bool equal = false;
+
+       /* if dpm init failed */
+       if (!adev->pm.dpm_enabled)
+               return 0;
+
+       if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+               /* add other state override checks here */
+               if ((!adev->pm.dpm.thermal_active) &&
+                   (!adev->pm.dpm.uvd_active))
+                       adev->pm.dpm.state = adev->pm.dpm.user_state;
+       }
+       dpm_state = adev->pm.dpm.state;
+
+       ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+       if (ps)
+               adev->pm.dpm.requested_ps = ps;
+       else
+               return -EINVAL;
+
+       if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
+               printk("switching from power state:\n");
+               amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+               printk("switching to power state:\n");
+               amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+       }
+
+       /* update whether vce is active */
+       ps->vce_active = adev->pm.dpm.vce_active;
+       if (pp_funcs->display_configuration_changed)
+               amdgpu_dpm_display_configuration_changed(adev);
+
+       ret = amdgpu_dpm_pre_set_power_state(adev);
+       if (ret)
+               return ret;
+
+       if (pp_funcs->check_state_equal) {
+               if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+                       equal = false;
+       }
+
+       if (equal)
+               return 0;
+
+       if (pp_funcs->set_power_state)
+               pp_funcs->set_power_state(adev->powerplay.pp_handle);
+
+       amdgpu_dpm_post_set_power_state(adev);
+
+       adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+       adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+       if (pp_funcs->force_performance_level) {
+               if (adev->pm.dpm.thermal_active) {
+                       enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
+                       /* force low perf level for thermal */
+                       pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
+                       /* save the user's level */
+                       adev->pm.dpm.forced_level = level;
+               } else {
+                       /* otherwise, user selected level */
+                       pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
+               }
+       }
+
+       return 0;
+}
+
+void amdgpu_legacy_dpm_compute_clocks(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i = 0;
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_bandwidth_update(adev);
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               if (ring && ring->sched.ready)
+                       amdgpu_fence_wait_empty(ring);
+       }
+
+       amdgpu_dpm_get_active_displays(adev);
+
+       amdgpu_dpm_change_power_state_locked(adev);
+}
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+       struct amdgpu_device *adev =
+               container_of(work, struct amdgpu_device,
+                            pm.dpm.thermal.work);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       /* switch to the thermal state */
+       enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+       int temp, size = sizeof(temp);
+
+       if (!adev->pm.dpm_enabled)
+               return;
+
+       if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
+                                  AMDGPU_PP_SENSOR_GPU_TEMP,
+                                  (void *)&temp,
+                                  &size)) {
+               if (temp < adev->pm.dpm.thermal.min_temp)
+                       /* switch back the user state */
+                       dpm_state = adev->pm.dpm.user_state;
+       } else {
+               if (adev->pm.dpm.thermal.high_to_low)
+                       /* switch back the user state */
+                       dpm_state = adev->pm.dpm.user_state;
+       }
+
+       if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+               adev->pm.dpm.thermal_active = true;
+       else
+               adev->pm.dpm.thermal_active = false;
+
+       adev->pm.dpm.state = dpm_state;
+
+       amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
new file mode 100644 (file)
index 0000000..93bd397
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __LEGACY_DPM_H__
+#define __LEGACY_DPM_H__
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2);
+void amdgpu_dpm_print_cap_info(u32 caps);
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+                               struct amdgpu_ps *rps);
+int amdgpu_get_platform_caps(struct amdgpu_device *adev);
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx);
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+void amdgpu_legacy_dpm_compute_clocks(void *handle);
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
+#endif
similarity index 98%
rename from drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
rename to drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 81f82aa05ec287234094c02d6e4f5fde5b28dc0e..caae54487f9cb753337ec10ac2ebe9fdd66620af 100644 (file)
@@ -28,6 +28,7 @@
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
 #include "amdgpu_atombios.h"
+#include "amdgpu_dpm_internal.h"
 #include "amd_pcie.h"
 #include "sid.h"
 #include "r600_dpm.h"
@@ -37,6 +38,7 @@
 #include <linux/math64.h>
 #include <linux/seq_file.h>
 #include <linux/firmware.h>
+#include <legacy_dpm.h>
 
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
@@ -96,6 +98,19 @@ union pplib_clock_info {
        struct _ATOM_PPLIB_SI_CLOCK_INFO si;
 };
 
+enum si_dpm_auto_throttle_src {
+       SI_DPM_AUTO_THROTTLE_SRC_THERMAL,
+       SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum si_dpm_event_src {
+       SI_DPM_EVENT_SRC_ANALOG = 0,
+       SI_DPM_EVENT_SRC_EXTERNAL = 1,
+       SI_DPM_EVENT_SRC_DIGITAL = 2,
+       SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+       SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
 static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
 {
        R600_UTC_DFLT_00,
@@ -3718,25 +3733,25 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
 {
        struct rv7xx_power_info *pi = rv770_get_pi(adev);
        bool want_thermal_protection;
-       enum amdgpu_dpm_event_src dpm_event_src;
+       enum si_dpm_event_src dpm_event_src;
 
        switch (sources) {
        case 0:
        default:
                want_thermal_protection = false;
                break;
-       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+       case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL):
                want_thermal_protection = true;
-               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+               dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL;
                break;
-       case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+       case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
                want_thermal_protection = true;
-               dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+               dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL;
                break;
-       case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
-             (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+       case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+             (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)):
                want_thermal_protection = true;
-               dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+               dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
                break;
        }
 
@@ -3750,7 +3765,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
 }
 
 static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
-                                          enum amdgpu_dpm_auto_throttle_src source,
+                                          enum si_dpm_auto_throttle_src source,
                                           bool enable)
 {
        struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -3877,6 +3892,40 @@ static int si_set_boot_state(struct amdgpu_device *adev)
 }
 #endif
 
+static int si_set_powergating_by_smu(void *handle,
+                                    uint32_t block_type,
+                                    bool gate)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       switch (block_type) {
+       case AMD_IP_BLOCK_TYPE_UVD:
+               if (!gate) {
+                       adev->pm.dpm.uvd_active = true;
+                       adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+               } else {
+                       adev->pm.dpm.uvd_active = false;
+               }
+
+               amdgpu_legacy_dpm_compute_clocks(handle);
+               break;
+       case AMD_IP_BLOCK_TYPE_VCE:
+               if (!gate) {
+                       adev->pm.dpm.vce_active = true;
+                       /* XXX select vce level based on ring/task */
+                       adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+               } else {
+                       adev->pm.dpm.vce_active = false;
+               }
+
+               amdgpu_legacy_dpm_compute_clocks(handle);
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
 static int si_set_sw_state(struct amdgpu_device *adev)
 {
        return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
@@ -4927,6 +4976,31 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev,
+                                               u32 sys_mask,
+                                               enum si_pcie_gen asic_gen,
+                                               enum si_pcie_gen default_gen)
+{
+       switch (asic_gen) {
+       case SI_PCIE_GEN1:
+               return SI_PCIE_GEN1;
+       case SI_PCIE_GEN2:
+               return SI_PCIE_GEN2;
+       case SI_PCIE_GEN3:
+               return SI_PCIE_GEN3;
+       default:
+               if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+                   (default_gen == SI_PCIE_GEN3))
+                       return SI_PCIE_GEN3;
+               else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+                        (default_gen == SI_PCIE_GEN2))
+                       return SI_PCIE_GEN2;
+               else
+                       return SI_PCIE_GEN1;
+       }
+       return SI_PCIE_GEN1;
+}
+
 static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
                                      SISLANDS_SMC_STATETABLE *table)
 {
@@ -4989,10 +5063,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
                                                              &table->ACPIState.level.std_vddc);
                }
                table->ACPIState.level.gen2PCIE =
-                       (u8)amdgpu_get_pcie_gen_support(adev,
-                                                       si_pi->sys_pcie_mask,
-                                                       si_pi->boot_pcie_gen,
-                                                       AMDGPU_PCIE_GEN1);
+                       (u8)si_gen_pcie_gen_support(adev,
+                                                   si_pi->sys_pcie_mask,
+                                                   si_pi->boot_pcie_gen,
+                                                   SI_PCIE_GEN1);
 
                if (si_pi->vddc_phase_shed_control)
                        si_populate_phase_shedding_value(adev,
@@ -5430,7 +5504,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
        bool gmc_pg = false;
 
        if (eg_pi->pcie_performance_request &&
-           (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+           (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID))
                level->gen2PCIE = (u8)si_pi->force_pcie_gen;
        else
                level->gen2PCIE = (u8)pl->pcie_gen;
@@ -6147,8 +6221,8 @@ static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
                WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
 }
 
-static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
-                                                     struct amdgpu_ps *amdgpu_state)
+static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+                                                 struct amdgpu_ps *amdgpu_state)
 {
        struct si_ps *state = si_get_ps(amdgpu_state);
        int i;
@@ -6177,27 +6251,27 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
                                                             struct amdgpu_ps *amdgpu_current_state)
 {
        struct si_power_info *si_pi = si_get_pi(adev);
-       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
-       enum amdgpu_pcie_gen current_link_speed;
+       enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       enum si_pcie_gen current_link_speed;
 
-       if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+       if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID)
                current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
        else
                current_link_speed = si_pi->force_pcie_gen;
 
-       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
        si_pi->pspp_notify_required = false;
        if (target_link_speed > current_link_speed) {
                switch (target_link_speed) {
 #if defined(CONFIG_ACPI)
-               case AMDGPU_PCIE_GEN3:
+               case SI_PCIE_GEN3:
                        if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
                                break;
-                       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
-                       if (current_link_speed == AMDGPU_PCIE_GEN2)
+                       si_pi->force_pcie_gen = SI_PCIE_GEN2;
+                       if (current_link_speed == SI_PCIE_GEN2)
                                break;
                        fallthrough;
-               case AMDGPU_PCIE_GEN2:
+               case SI_PCIE_GEN2:
                        if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
                                break;
                        fallthrough;
@@ -6217,13 +6291,13 @@ static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
                                                           struct amdgpu_ps *amdgpu_current_state)
 {
        struct si_power_info *si_pi = si_get_pi(adev);
-       enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+       enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
        u8 request;
 
        if (si_pi->pspp_notify_required) {
-               if (target_link_speed == AMDGPU_PCIE_GEN3)
+               if (target_link_speed == SI_PCIE_GEN3)
                        request = PCIE_PERF_REQ_PECI_GEN3;
-               else if (target_link_speed == AMDGPU_PCIE_GEN2)
+               else if (target_link_speed == SI_PCIE_GEN2)
                        request = PCIE_PERF_REQ_PECI_GEN2;
                else
                        request = PCIE_PERF_REQ_PECI_GEN1;
@@ -6546,6 +6620,9 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
        u64 tmp64;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (!speed)
+               return -EINVAL;
+
        if (adev->pm.no_fan)
                return -ENOENT;
 
@@ -6596,10 +6673,13 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
        return 0;
 }
 
-static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
+static int si_dpm_set_fan_control_mode(void *handle, u32 mode)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (mode == U32_MAX)
+               return -EINVAL;
+
        if (mode) {
                /* stop auto-manage */
                if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6612,19 +6692,26 @@ static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
                else
                        si_fan_ctrl_set_default_mode(adev);
        }
+
+       return 0;
 }
 
-static u32 si_dpm_get_fan_control_mode(void *handle)
+static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct si_power_info *si_pi = si_get_pi(adev);
        u32 tmp;
 
+       if (!fan_mode)
+               return -EINVAL;
+
        if (si_pi->fan_is_controlled_by_smc)
                return 0;
 
        tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
-       return (tmp >> FDO_PWM_MODE_SHIFT);
+       *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+
+       return 0;
 }
 
 #if 0
@@ -6864,7 +6951,7 @@ static int si_dpm_enable(struct amdgpu_device *adev)
        si_enable_sclk_control(adev, true);
        si_start_dpm(adev);
 
-       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+       si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
        si_thermal_start_thermal_controller(adev);
 
        ni_update_current_ps(adev, boot_ps);
@@ -6904,7 +6991,7 @@ static void si_dpm_disable(struct amdgpu_device *adev)
        si_enable_power_containment(adev, boot_ps, false);
        si_enable_smc_cac(adev, boot_ps, false);
        si_enable_spread_spectrum(adev, false);
-       si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+       si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
        si_stop_dpm(adev);
        si_reset_to_default(adev);
        si_dpm_stop_smc(adev);
@@ -6946,10 +7033,7 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
        ret = si_resume_smc(adev);
        if (ret)
                return ret;
-       ret = si_set_sw_state(adev);
-       if (ret)
-               return ret;
-       return 0;
+       return si_set_sw_state(adev);
 }
 
 static void si_set_vce_clock(struct amdgpu_device *adev,
@@ -7148,10 +7232,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
        pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
        pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
        pl->flags = le32_to_cpu(clock_info->si.ulFlags);
-       pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
-                                                  si_pi->sys_pcie_mask,
-                                                  si_pi->boot_pcie_gen,
-                                                  clock_info->si.ucPCIEGen);
+       pl->pcie_gen = si_gen_pcie_gen_support(adev,
+                                              si_pi->sys_pcie_mask,
+                                              si_pi->boot_pcie_gen,
+                                              clock_info->si.ucPCIEGen);
 
        /* patch up vddc if necessary */
        ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7318,7 +7402,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
 
        si_pi->sys_pcie_mask =
                adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
-       si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+       si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
        si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
 
        si_set_max_cu_value(adev);
@@ -7713,21 +7797,18 @@ static int si_dpm_sw_init(void *handle)
                return ret;
 
        INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
-       mutex_lock(&adev->pm.mutex);
        ret = si_dpm_init(adev);
        if (ret)
                goto dpm_failed;
        adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
        if (amdgpu_dpm == 1)
                amdgpu_pm_print_power_states(adev);
-       mutex_unlock(&adev->pm.mutex);
        DRM_INFO("amdgpu: dpm initialized\n");
 
        return 0;
 
 dpm_failed:
        si_dpm_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
        DRM_ERROR("amdgpu: dpm initialization failed\n");
        return ret;
 }
@@ -7738,9 +7819,7 @@ static int si_dpm_sw_fini(void *handle)
 
        flush_work(&adev->pm.dpm.thermal.work);
 
-       mutex_lock(&adev->pm.mutex);
        si_dpm_fini(adev);
-       mutex_unlock(&adev->pm.mutex);
 
        return 0;
 }
@@ -7754,15 +7833,13 @@ static int si_dpm_hw_init(void *handle)
        if (!amdgpu_dpm)
                return 0;
 
-       mutex_lock(&adev->pm.mutex);
        si_dpm_setup_asic(adev);
        ret = si_dpm_enable(adev);
        if (ret)
                adev->pm.dpm_enabled = false;
        else
                adev->pm.dpm_enabled = true;
-       mutex_unlock(&adev->pm.mutex);
-       amdgpu_pm_compute_clocks(adev);
+       amdgpu_legacy_dpm_compute_clocks(adev);
        return ret;
 }
 
@@ -7770,11 +7847,8 @@ static int si_dpm_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->pm.dpm_enabled) {
-               mutex_lock(&adev->pm.mutex);
+       if (adev->pm.dpm_enabled)
                si_dpm_disable(adev);
-               mutex_unlock(&adev->pm.mutex);
-       }
 
        return 0;
 }
@@ -7784,12 +7858,10 @@ static int si_dpm_suspend(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        if (adev->pm.dpm_enabled) {
-               mutex_lock(&adev->pm.mutex);
                /* disable dpm */
                si_dpm_disable(adev);
                /* reset the power state */
                adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
-               mutex_unlock(&adev->pm.mutex);
        }
        return 0;
 }
@@ -7801,16 +7873,14 @@ static int si_dpm_resume(void *handle)
 
        if (adev->pm.dpm_enabled) {
                /* asic init will reset to the boot state */
-               mutex_lock(&adev->pm.mutex);
                si_dpm_setup_asic(adev);
                ret = si_dpm_enable(adev);
                if (ret)
                        adev->pm.dpm_enabled = false;
                else
                        adev->pm.dpm_enabled = true;
-               mutex_unlock(&adev->pm.mutex);
                if (adev->pm.dpm_enabled)
-                       amdgpu_pm_compute_clocks(adev);
+                       amdgpu_legacy_dpm_compute_clocks(adev);
        }
        return 0;
 }
@@ -8055,6 +8125,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
        .print_power_state = &si_dpm_print_power_state,
        .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
        .force_performance_level = &si_dpm_force_performance_level,
+       .set_powergating_by_smu = &si_set_powergating_by_smu,
        .vblank_too_short = &si_dpm_vblank_too_short,
        .set_fan_control_mode = &si_dpm_set_fan_control_mode,
        .get_fan_control_mode = &si_dpm_get_fan_control_mode,
@@ -8063,6 +8134,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
        .check_state_equal = &si_check_state_equal,
        .get_vce_clock_state = amdgpu_get_vce_clock_state,
        .read_sensor = &si_dpm_read_sensor,
+       .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
 };
 
 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
similarity index 99%
rename from drivers/gpu/drm/amd/pm/powerplay/si_dpm.h
rename to drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index bc0be6818e218dc1c3e01f5c9c40d0ba542da840..11cb7874a6bbd792fe567c20ea714ad4cb03ec82 100644 (file)
@@ -595,13 +595,20 @@ struct rv7xx_power_info {
        RV770_SMC_STATETABLE smc_statetable;
 };
 
+enum si_pcie_gen {
+       SI_PCIE_GEN1 = 0,
+       SI_PCIE_GEN2 = 1,
+       SI_PCIE_GEN3 = 2,
+       SI_PCIE_GEN_INVALID = 0xffff
+};
+
 struct rv7xx_pl {
        u32 sclk;
        u32 mclk;
        u16 vddc;
        u16 vddci; /* eg+ only */
        u32 flags;
-       enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+       enum si_pcie_gen pcie_gen; /* si+ only */
 };
 
 struct rv7xx_ps {
@@ -967,9 +974,9 @@ struct si_power_info {
        struct si_ulv_param ulv;
        u32 max_cu;
        /* pcie gen */
-       enum amdgpu_pcie_gen force_pcie_gen;
-       enum amdgpu_pcie_gen boot_pcie_gen;
-       enum amdgpu_pcie_gen acpi_pcie_gen;
+       enum si_pcie_gen force_pcie_gen;
+       enum si_pcie_gen boot_pcie_gen;
+       enum si_pcie_gen acpi_pcie_gen;
        u32 sys_pcie_mask;
        /* flags */
        bool enable_dte;
index 0fb114adc79f61398338f5d999804a0bd767683f..795a3624cbbf7ddf779da7ba69b5925dbcca010a 100644 (file)
@@ -30,10 +30,6 @@ include $(AMD_POWERPLAY)
 
 POWER_MGR-y = amd_powerplay.o
 
-POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
-
-POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
-
 AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y))
 
 AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
index 3ab67b232cd492e0dd8761ebcb80153533512821..a2da46bf3985d0b22ed9eeb9526ac7fb2042ba14 100644 (file)
@@ -31,7 +31,8 @@
 #include "power_state.h"
 #include "amdgpu.h"
 #include "hwmgr.h"
-
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
 
 static const struct amd_pm_funcs pp_dpm_funcs;
 
@@ -49,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
        hwmgr->adev = adev;
        hwmgr->not_vf = !amdgpu_sriov_vf(adev);
        hwmgr->device = amdgpu_cgs_create_device(adev);
-       mutex_init(&hwmgr->smu_lock);
        mutex_init(&hwmgr->msg_lock);
        hwmgr->chip_family = adev->family;
        hwmgr->chip_id = adev->asic_type;
@@ -177,12 +177,9 @@ static int pp_late_init(void *handle)
        struct amdgpu_device *adev = handle;
        struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
 
-       if (hwmgr && hwmgr->pm_en) {
-               mutex_lock(&hwmgr->smu_lock);
+       if (hwmgr && hwmgr->pm_en)
                hwmgr_handle_task(hwmgr,
                                        AMD_PP_TASK_COMPLETE_INIT, NULL);
-               mutex_unlock(&hwmgr->smu_lock);
-       }
        if (adev->pm.smu_prv_buffer_size != 0)
                pp_reserve_vram_for_smu(adev);
 
@@ -322,12 +319,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
                if (*level & profile_mode_mask) {
                        hwmgr->saved_dpm_level = hwmgr->dpm_level;
                        hwmgr->en_umd_pstate = true;
-                       amdgpu_device_ip_set_powergating_state(hwmgr->adev,
-                                       AMD_IP_BLOCK_TYPE_GFX,
-                                       AMD_PG_STATE_UNGATE);
-                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
-                                               AMD_IP_BLOCK_TYPE_GFX,
-                                               AMD_CG_STATE_UNGATE);
                }
        } else {
                /* exit umd pstate, restore level, enable gfx cg*/
@@ -335,12 +326,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr  *hwmgr,
                        if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
                                *level = hwmgr->saved_dpm_level;
                        hwmgr->en_umd_pstate = false;
-                       amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
-                                       AMD_IP_BLOCK_TYPE_GFX,
-                                       AMD_CG_STATE_GATE);
-                       amdgpu_device_ip_set_powergating_state(hwmgr->adev,
-                                       AMD_IP_BLOCK_TYPE_GFX,
-                                       AMD_PG_STATE_GATE);
                }
        }
 }
@@ -356,11 +341,9 @@ static int pp_dpm_force_performance_level(void *handle,
        if (level == hwmgr->dpm_level)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        pp_dpm_en_umd_pstate(hwmgr, &level);
        hwmgr->request_dpm_level = level;
        hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -369,21 +352,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
                                                                void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
-       enum amd_dpm_forced_level level;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       level = hwmgr->dpm_level;
-       mutex_unlock(&hwmgr->smu_lock);
-       return level;
+       return hwmgr->dpm_level;
 }
 
 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
 {
        struct pp_hwmgr *hwmgr = handle;
-       uint32_t clk = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return 0;
@@ -392,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
-       mutex_unlock(&hwmgr->smu_lock);
-       return clk;
+       return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
 }
 
 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
 {
        struct pp_hwmgr *hwmgr = handle;
-       uint32_t clk = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return 0;
@@ -410,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
-       mutex_unlock(&hwmgr->smu_lock);
-       return clk;
+       return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
 }
 
 static void pp_dpm_powergate_vce(void *handle, bool gate)
@@ -427,9 +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return;
        }
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
-       mutex_unlock(&hwmgr->smu_lock);
 }
 
 static void pp_dpm_powergate_uvd(void *handle, bool gate)
@@ -443,25 +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return;
        }
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
-       mutex_unlock(&hwmgr->smu_lock);
 }
 
 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
                enum amd_pm_state_type *user_state)
 {
-       int ret = 0;
        struct pp_hwmgr *hwmgr = handle;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr_handle_task(hwmgr, task_id, user_state);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return hwmgr_handle_task(hwmgr, task_id, user_state);
 }
 
 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
@@ -473,8 +435,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
        if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        state = hwmgr->current_ps;
 
        switch (state->classification.ui_label) {
@@ -494,115 +454,107 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
                        pm_type = POWER_STATE_TYPE_DEFAULT;
                break;
        }
-       mutex_unlock(&hwmgr->smu_lock);
 
        return pm_type;
 }
 
-static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
 {
        struct pp_hwmgr *hwmgr = handle;
 
        if (!hwmgr || !hwmgr->pm_en)
-               return;
+               return -EOPNOTSUPP;
+
+       if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
+               return -EOPNOTSUPP;
+
+       if (mode == U32_MAX)
+               return -EINVAL;
 
-       if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
-               pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return;
-       }
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
-       mutex_unlock(&hwmgr->smu_lock);
+
+       return 0;
 }
 
-static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
 {
        struct pp_hwmgr *hwmgr = handle;
-       uint32_t mode = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
-               return 0;
+               return -EOPNOTSUPP;
 
-       if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
-               pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return 0;
-       }
-       mutex_lock(&hwmgr->smu_lock);
-       mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
-       mutex_unlock(&hwmgr->smu_lock);
-       return mode;
+       if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
+               return -EOPNOTSUPP;
+
+       if (!fan_mode)
+               return -EINVAL;
+
+       *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+       return 0;
 }
 
 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
+               return -EOPNOTSUPP;
+
+       if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
+               return -EOPNOTSUPP;
+
+       if (speed == U32_MAX)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
-               pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return 0;
-       }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
 }
 
 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
-       if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
-               pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return 0;
-       }
+       if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
+               return -EOPNOTSUPP;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       if (!speed)
+               return -EINVAL;
+
+       return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
 }
 
 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+               return -EOPNOTSUPP;
+
+       if (!rpm)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
 }
 
 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
+               return -EOPNOTSUPP;
+
+       if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
+               return -EOPNOTSUPP;
+
+       if (rpm == U32_MAX)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
-               pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return 0;
-       }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
 }
 
 static int pp_dpm_get_pp_num_states(void *handle,
@@ -616,8 +568,6 @@ static int pp_dpm_get_pp_num_states(void *handle,
        if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        data->nums = hwmgr->num_ps;
 
        for (i = 0; i < hwmgr->num_ps; i++) {
@@ -640,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void *handle,
                                data->states[i] = POWER_STATE_TYPE_DEFAULT;
                }
        }
-       mutex_unlock(&hwmgr->smu_lock);
        return 0;
 }
 
 static int pp_dpm_get_pp_table(void *handle, char **table)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int size = 0;
 
        if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
        *table = (char *)hwmgr->soft_pp_table;
-       size = hwmgr->soft_pp_table_size;
-       mutex_unlock(&hwmgr->smu_lock);
-       return size;
+       return hwmgr->soft_pp_table_size;
 }
 
 static int amd_powerplay_reset(void *handle)
@@ -683,13 +628,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
        if (!hwmgr->hardcode_pp_table) {
                hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
                                                   hwmgr->soft_pp_table_size,
                                                   GFP_KERNEL);
                if (!hwmgr->hardcode_pp_table)
-                       goto err;
+                       return ret;
        }
 
        memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -698,17 +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 
        ret = amd_powerplay_reset(handle);
        if (ret)
-               goto err;
+               return ret;
 
-       if (hwmgr->hwmgr_func->avfs_control) {
+       if (hwmgr->hwmgr_func->avfs_control)
                ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
-               if (ret)
-                       goto err;
-       }
-       mutex_unlock(&hwmgr->smu_lock);
-       return 0;
-err:
-       mutex_unlock(&hwmgr->smu_lock);
+
        return ret;
 }
 
@@ -716,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
                enum pp_clock_type type, uint32_t mask)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -731,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
 }
 
 static int pp_dpm_print_clock_levels(void *handle,
                enum pp_clock_type type, char *buf)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -750,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 }
 
 static int pp_dpm_get_sclk_od(void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -768,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
 }
 
 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -787,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
                return 0;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
 }
 
 static int pp_dpm_get_mclk_od(void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -805,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
 }
 
 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -823,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
                pr_info_ratelimited("%s was not implemented.\n", __func__);
                return 0;
        }
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
 }
 
 static int pp_dpm_read_sensor(void *handle, int idx,
                              void *value, int *size)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en || !value)
                return -EINVAL;
@@ -852,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
                *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
                return 0;
        default:
-               mutex_lock(&hwmgr->smu_lock);
-               ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
-               mutex_unlock(&hwmgr->smu_lock);
-               return ret;
+               return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
        }
 }
 
@@ -875,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
 static int pp_get_power_profile_mode(void *handle, char *buf)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret;
 
        if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
                return -EOPNOTSUPP;
        if (!buf)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
 }
 
 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = -EOPNOTSUPP;
 
        if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
-               return ret;
+               return -EOPNOTSUPP;
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
                pr_debug("power profile setting is for manual dpm mode only.\n");
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
 }
 
 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
@@ -969,8 +871,6 @@ static int pp_dpm_switch_power_profile(void *handle,
        if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        if (!en) {
                hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
                index = fls(hwmgr->workload_mask);
@@ -985,15 +885,12 @@ static int pp_dpm_switch_power_profile(void *handle,
 
        if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
                hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
-                       if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
-                               mutex_unlock(&hwmgr->smu_lock);
+                       if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
                                return -EINVAL;
-                       }
        }
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
                hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1023,10 +920,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
        if (limit > max_power_limit)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
        hwmgr->power_limit = limit;
-       mutex_unlock(&hwmgr->smu_lock);
        return 0;
 }
 
@@ -1043,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
        if (power_type != PP_PWR_TYPE_SUSTAINED)
                return -EOPNOTSUPP;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        switch (pp_limit_level) {
                case PP_PWR_LIMIT_CURRENT:
                        *limit = hwmgr->power_limit;
@@ -1064,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
                        break;
        }
 
-       mutex_unlock(&hwmgr->smu_lock);
-
        return ret;
 }
 
@@ -1077,9 +968,7 @@ static int pp_display_configuration_change(void *handle,
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
        phm_store_dal_configuration_data(hwmgr, display_config);
-       mutex_unlock(&hwmgr->smu_lock);
        return 0;
 }
 
@@ -1087,15 +976,11 @@ static int pp_get_display_power_level(void *handle,
                struct amd_pp_simple_clock_info *output)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en ||!output)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_get_dal_power_level(hwmgr, output);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return phm_get_dal_power_level(hwmgr, output);
 }
 
 static int pp_get_current_clocks(void *handle,
@@ -1109,8 +994,6 @@ static int pp_get_current_clocks(void *handle,
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        phm_get_dal_power_level(hwmgr, &simple_clocks);
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -1123,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
 
        if (ret) {
                pr_debug("Error in phm_get_clock_info \n");
-               mutex_unlock(&hwmgr->smu_lock);
                return -EINVAL;
        }
 
@@ -1146,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
                clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
                clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
        }
-       mutex_unlock(&hwmgr->smu_lock);
        return 0;
 }
 
 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -1161,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
        if (clocks == NULL)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_get_clock_by_type(hwmgr, type, clocks);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return phm_get_clock_by_type(hwmgr, type, clocks);
 }
 
 static int pp_get_clock_by_type_with_latency(void *handle,
@@ -1172,15 +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
                struct pp_clock_levels_with_latency *clocks)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en ||!clocks)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
 }
 
 static int pp_get_clock_by_type_with_voltage(void *handle,
@@ -1188,50 +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
                struct pp_clock_levels_with_voltage *clocks)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en ||!clocks)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-
-       ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
-
-       mutex_unlock(&hwmgr->smu_lock);
-       return ret;
+       return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
 }
 
 static int pp_set_watermarks_for_clocks_ranges(void *handle,
                void *clock_ranges)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
-                       clock_ranges);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return phm_set_watermarks_for_clocks_ranges(hwmgr,
+                                                   clock_ranges);
 }
 
 static int pp_display_clock_voltage_request(void *handle,
                struct pp_display_clock_request *clock)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en ||!clock)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_display_clock_voltage_request(hwmgr, clock);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return phm_display_clock_voltage_request(hwmgr, clock);
 }
 
 static int pp_get_display_mode_validation_clocks(void *handle,
@@ -1245,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
 
        clocks->level = PP_DAL_POWERLEVEL_7;
 
-       mutex_lock(&hwmgr->smu_lock);
-
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
                ret = phm_get_max_high_clocks(hwmgr, clocks);
 
-       mutex_unlock(&hwmgr->smu_lock);
        return ret;
 }
 
@@ -1362,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1380,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
             hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1399,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1418,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1437,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1447,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
 static int pp_set_active_display_count(void *handle, uint32_t count)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = phm_set_active_display_count(hwmgr, count);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return phm_set_active_display_count(hwmgr, count);
 }
 
 static int pp_get_asic_baco_capability(void *handle, bool *cap)
@@ -1471,9 +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
                !hwmgr->hwmgr_func->get_asic_baco_capability)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1488,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
        if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1506,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
                !hwmgr->hwmgr_func->set_asic_baco_state)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1516,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int state)
 static int pp_get_ppfeature_status(void *handle, char *buf)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en || !buf)
                return -EINVAL;
@@ -1526,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
 }
 
 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -1546,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
 }
 
 static int pp_asic_reset_mode_2(void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -1566,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
 }
 
 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
 {
        struct pp_hwmgr *hwmgr = handle;
-       int ret = 0;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -1586,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
-       ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return ret;
+       return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
 }
 
 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
@@ -1603,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
        if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1620,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
        if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
                return 0;
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
@@ -1630,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
 {
        struct pp_hwmgr *hwmgr = handle;
-       ssize_t size;
 
        if (!hwmgr)
                return -EINVAL;
@@ -1638,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
        if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
                return -EOPNOTSUPP;
 
-       mutex_lock(&hwmgr->smu_lock);
-       size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
-       mutex_unlock(&hwmgr->smu_lock);
-
-       return size;
+       return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
 }
 
 static int pp_gfx_state_change_set(void *handle, uint32_t state)
@@ -1657,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
                return -EINVAL;
        }
 
-       mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
-       mutex_unlock(&hwmgr->smu_lock);
        return 0;
 }
 
@@ -1673,16 +1475,49 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
 
        *addr = NULL;
        *size = 0;
-       mutex_lock(&hwmgr->smu_lock);
        if (adev->pm.smu_prv_buffer) {
                amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
                *size = adev->pm.smu_prv_buffer_size;
        }
-       mutex_unlock(&hwmgr->smu_lock);
 
        return 0;
 }
 
+static void pp_pm_compute_clocks(void *handle)
+{
+       struct pp_hwmgr *hwmgr = handle;
+       struct amdgpu_device *adev = hwmgr->adev;
+       int i = 0;
+
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_bandwidth_update(adev);
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               if (ring && ring->sched.ready)
+                       amdgpu_fence_wait_empty(ring);
+       }
+
+       if (!amdgpu_device_has_dc_support(adev)) {
+               amdgpu_dpm_get_active_displays(adev);
+               adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+               adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+               adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+               /* we have issues with mclk switching with
+                * refresh rates over 120 hz on the non-DC code.
+                */
+               if (adev->pm.pm_display_cfg.vrefresh > 120)
+                       adev->pm.pm_display_cfg.min_vblank_time = 0;
+
+               pp_display_configuration_change(handle,
+                                               &adev->pm.pm_display_cfg);
+       }
+
+       pp_dpm_dispatch_tasks(handle,
+                             AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+                             NULL);
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
        .load_firmware = pp_dpm_load_fw,
        .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1747,4 +1582,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .get_gpu_metrics = pp_get_gpu_metrics,
        .gfx_state_change_set = pp_gfx_state_change_set,
        .get_smu_prv_buf_details = pp_get_prv_buffer_details,
+       .pm_compute_clocks = pp_pm_compute_clocks,
 };
index cd99db0dc2be95607416d7583324f5ae4160d648..a1e11037831abe72491c2da6d33c0998c773374d 100644 (file)
@@ -3295,10 +3295,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
                        request_ps->classification.ui_label);
        data->mclk_ignore_signal = false;
 
-       PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
-                                "VI should always have 2 performance levels",
-                               );
-
        max_limits = adev->pm.ac_power ?
                        &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
                        &(hwmgr->dyn_state.max_clock_voltage_on_dc);
index 03bf8f0692228d48de0b9776fb3fe9205f7ee2ce..b50fd4a4a3d1ae1d45ad1b4d2ce61ede3b773eb3 100644 (file)
@@ -1950,9 +1950,12 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
                smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
 }
 
+#define WIDTH_4K               3840
+
 static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct smu8_hwmgr *data = hwmgr->backend;
+       struct amdgpu_device *adev = hwmgr->adev;
 
        data->uvd_power_gated = bgate;
 
@@ -1976,6 +1979,12 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
                smu8_dpm_update_uvd_dpm(hwmgr, false);
        }
 
+       /* enable/disable Low Memory PState for UVD (4k videos) */
+       if (adev->asic_type == CHIP_STONEY &&
+           adev->uvd.decode_image_width >= WIDTH_4K)
+               smu8_nbdpm_pstate_enable_disable(hwmgr,
+                                                bgate,
+                                                true);
 }
 
 static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
@@ -2037,7 +2046,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
        .power_state_set = smu8_set_power_state_tasks,
        .dynamic_state_management_disable = smu8_disable_dpm_tasks,
        .notify_cac_buffer_info = smu8_notify_cac_buffer_info,
-       .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
        .get_thermal_temperature_range = smu8_get_thermal_temperature_range,
 };
 
similarity index 99%
rename from drivers/gpu/drm/amd/pm/inc/hwmgr.h
rename to drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 8ed01071fe5ade69cb992ad7d58f970745e3b3df..4f7f2f455301a9e1c02a9c655dc3b3f4fe77baa1 100644 (file)
@@ -331,9 +331,6 @@ struct pp_hwmgr_func {
                                        uint32_t mc_addr_low,
                                        uint32_t mc_addr_hi,
                                        uint32_t size);
-       int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
-                                       bool enable,
-                                       bool lock);
        int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
                                        struct PP_TemperatureRange *range);
        int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -751,7 +748,6 @@ struct pp_hwmgr {
        bool not_vf;
        bool pm_en;
        bool pp_one_vf;
-       struct mutex smu_lock;
        struct mutex msg_lock;
 
        uint32_t pp_table_version;
similarity index 99%
rename from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h
rename to drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h
index eb0f79f9c8764e4dafab5960852dcdde765e9362..701aae598b58c9edfbfc469a040e10fa34cc0671 100644 (file)
@@ -121,7 +121,7 @@ typedef struct SMU_Task SMU_Task;
 
 struct TOC {
     uint8_t JobList[NUM_JOBLIST_ENTRIES];
-    SMU_Task tasks[1];
+    SMU_Task tasks[];
 };
 
 // META DATA COMMAND Definitions
index 93a1c7248e26ac231e7165a8631312a623a8e0f8..5ca3c422f7d4f09940b8c8c670a7497af8651c01 100644 (file)
@@ -208,6 +208,7 @@ static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
 
 static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
        int ret;
 
        cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
@@ -218,7 +219,8 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
 
        if (ret != 1)
-               pr_info("\n failed to send message %x ret is %d\n",  msg, ret);
+               dev_info(adev->dev,
+                       "failed to send message %x ret is %d\n", msg,ret);
 
        return 0;
 }
index 47b34c6ca924ea17e41bf1fb48a25bcf36c41f89..88a5641465dcf5efbcbd87f9e35b79ee0ec7c797 100644 (file)
@@ -87,7 +87,7 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
 
        if (smu10_wait_for_response(hwmgr) == 0)
-               printk("Failed to send Message %x.\n", msg);
+               dev_err(adev->dev, "Failed to send Message %x.\n", msg);
 
        return 0;
 }
@@ -108,7 +108,7 @@ static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
 
        if (smu10_wait_for_response(hwmgr) == 0)
-               printk("Failed to send Message %x.\n", msg);
+               dev_err(adev->dev, "Failed to send Message %x.\n", msg);
 
        return 0;
 }
index aae25243eb10d33a5ea84903ea790e2daa0213d3..5a010cd3830373731abf594a190054ac998d035f 100644 (file)
@@ -165,6 +165,7 @@ bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
 
 int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
        int ret;
 
        PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -172,9 +173,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
 
        if (ret == 0xFE)
-               pr_debug("last message was not supported\n");
+               dev_dbg(adev->dev, "last message was not supported\n");
        else if (ret != 1)
-               pr_info("\n last message was failed ret is %d\n", ret);
+               dev_info(adev->dev,
+                       "\nlast message was failed ret is %d\n", ret);
 
        cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
        cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
@@ -184,9 +186,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
        ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
 
        if (ret == 0xFE)
-               pr_debug("message %x was not supported\n", msg);
+               dev_dbg(adev->dev, "message %x was not supported\n", msg);
        else if (ret != 1)
-               pr_info("\n failed to send message %x ret is %d \n",  msg, ret);
+               dev_dbg(adev->dev,
+                       "failed to send message %x ret is %d \n",  msg, ret);
 
        return 0;
 }
index 23e5de3c4ec166fc743b31f00fa1b118d9313425..8c9bf4940dc15b265a6da4af981aaf31c09047db 100644 (file)
@@ -126,7 +126,7 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 
        ret = smu9_wait_for_response(hwmgr);
        if (ret != 1)
-               pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+               dev_err(adev->dev, "Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
 
        return 0;
 }
index 741fbc87467f9236730723f9231c094d44c34d48..a5c95b180672970cba93dfdf3efe09a76c381861 100644 (file)
@@ -115,7 +115,7 @@ static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
 
        ret = vega20_wait_for_response(hwmgr);
        if (ret != PPSMC_Result_OK)
-               pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+               dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
 
        return (ret == PPSMC_Result_OK) ? 0 : -EIO;
 }
@@ -143,7 +143,7 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
        ret = vega20_wait_for_response(hwmgr);
        if (ret != PPSMC_Result_OK)
-               pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+               dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
 
        return (ret == PPSMC_Result_OK) ? 0 : -EIO;
 }
@@ -520,7 +520,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
 
-       ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
+       ret = smu_v11_0_i2c_control_init(adev);
        if (ret)
                goto err4;
 
@@ -558,7 +558,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        struct amdgpu_device *adev = hwmgr->adev;
 
-       smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
+       smu_v11_0_i2c_control_fini(adev);
 
        if (priv) {
                amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
index d93d28c1af95ba85e73281f0a92bb25b326cc60b..e846231412bc87cae627a6872d9480853048e797 100644 (file)
@@ -55,11 +55,10 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
                                   uint32_t mask);
 static int smu_handle_task(struct smu_context *smu,
                           enum amd_dpm_forced_level level,
-                          enum amd_pp_task task_id,
-                          bool lock_needed);
+                          enum amd_pp_task task_id);
 static int smu_reset(struct smu_context *smu);
 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
-static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_fan_control_mode(void *handle, u32 value);
 static int smu_set_power_limit(void *handle, uint32_t limit);
 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
@@ -68,49 +67,32 @@ static int smu_sys_get_pp_feature_mask(void *handle,
                                       char *buf)
 {
        struct smu_context *smu = handle;
-       int size = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       size = smu_get_pp_feature_mask(smu, buf);
-
-       mutex_unlock(&smu->mutex);
-
-       return size;
+       return smu_get_pp_feature_mask(smu, buf);
 }
 
 static int smu_sys_set_pp_feature_mask(void *handle,
                                       uint64_t new_mask)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       ret = smu_set_pp_feature_mask(smu, new_mask);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_set_pp_feature_mask(smu, new_mask);
 }
 
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
 {
-       int ret = 0;
-       struct smu_context *smu = &adev->smu;
+       if (!smu->ppt_funcs->get_gfx_off_status)
+               return -EINVAL;
 
-       if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
-               *value = smu_get_gfx_off_status(smu);
-       else
-               ret = -EINVAL;
+       *value = smu_get_gfx_off_status(smu);
 
-       return ret;
+       return 0;
 }
 
 int smu_set_soft_freq_range(struct smu_context *smu,
@@ -120,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
 {
        int ret = 0;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->set_soft_freq_limited_range)
                ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
                                                                  clk_type,
                                                                  min,
                                                                  max);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -138,21 +116,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
                           uint32_t *min,
                           uint32_t *max)
 {
-       int ret = 0;
+       int ret = -ENOTSUPP;
 
        if (!min && !max)
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_dpm_ultimate_freq)
                ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
                                                            clk_type,
                                                            min,
                                                            max);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -184,8 +158,8 @@ static u32 smu_get_sclk(void *handle, bool low)
        return clk_freq * 100;
 }
 
-static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
-                                        bool enable)
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+                                 bool enable)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
        struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -204,24 +178,8 @@ static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
        return ret;
 }
 
-static int smu_dpm_set_vcn_enable(struct smu_context *smu,
-                                 bool enable)
-{
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
-       int ret = 0;
-
-       mutex_lock(&power_gate->vcn_gate_lock);
-
-       ret = smu_dpm_set_vcn_enable_locked(smu, enable);
-
-       mutex_unlock(&power_gate->vcn_gate_lock);
-
-       return ret;
-}
-
-static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
-                                         bool enable)
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+                                  bool enable)
 {
        struct smu_power_context *smu_power = &smu->smu_power;
        struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -240,22 +198,6 @@ static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
        return ret;
 }
 
-static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
-                                  bool enable)
-{
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
-       int ret = 0;
-
-       mutex_lock(&power_gate->jpeg_gate_lock);
-
-       ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
-
-       mutex_unlock(&power_gate->jpeg_gate_lock);
-
-       return ret;
-}
-
 /**
  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
  *
@@ -410,7 +352,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
        if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
            smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
                ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
-               if (ret) {
+               if (ret != -EOPNOTSUPP) {
                        smu->user_dpm_profile.fan_speed_pwm = 0;
                        smu->user_dpm_profile.fan_speed_rpm = 0;
                        smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
@@ -419,13 +361,13 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
 
                if (smu->user_dpm_profile.fan_speed_pwm) {
                        ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
-                       if (ret)
+                       if (ret != -EOPNOTSUPP)
                                dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
                }
 
                if (smu->user_dpm_profile.fan_speed_rpm) {
                        ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
-                       if (ret)
+                       if (ret != -EOPNOTSUPP)
                                dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
                }
        }
@@ -471,10 +413,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
 
 bool is_support_cclk_dpm(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
-
-       if (!is_support_sw_smu(adev))
-               return false;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
                return false;
@@ -488,7 +427,6 @@ static int smu_sys_get_pp_table(void *handle,
 {
        struct smu_context *smu = handle;
        struct smu_table_context *smu_table = &smu->smu_table;
-       uint32_t powerplay_table_size;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
@@ -496,18 +434,12 @@ static int smu_sys_get_pp_table(void *handle,
        if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
        if (smu_table->hardcode_pptable)
                *table = smu_table->hardcode_pptable;
        else
                *table = smu_table->power_play_table;
 
-       powerplay_table_size = smu_table->power_play_table_size;
-
-       mutex_unlock(&smu->mutex);
-
-       return powerplay_table_size;
+       return smu_table->power_play_table_size;
 }
 
 static int smu_sys_set_pp_table(void *handle,
@@ -527,12 +459,10 @@ static int smu_sys_set_pp_table(void *handle,
                return -EIO;
        }
 
-       mutex_lock(&smu->mutex);
-       if (!smu_table->hardcode_pptable)
-               smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
        if (!smu_table->hardcode_pptable) {
-               ret = -ENOMEM;
-               goto failed;
+               smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+               if (!smu_table->hardcode_pptable)
+                       return -ENOMEM;
        }
 
        memcpy(smu_table->hardcode_pptable, buf, size);
@@ -551,8 +481,6 @@ static int smu_sys_set_pp_table(void *handle,
 
        smu->uploading_custom_pp_table = false;
 
-failed:
-       mutex_unlock(&smu->mutex);
        return ret;
 }
 
@@ -578,7 +506,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
 
 static int smu_set_funcs(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
                smu->od_enabled = true;
@@ -630,13 +558,15 @@ static int smu_set_funcs(struct amdgpu_device *adev)
 static int smu_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu;
+
+       smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
+       if (!smu)
+               return -ENOMEM;
 
        smu->adev = adev;
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
-       mutex_init(&smu->mutex);
-       mutex_init(&smu->smu_baco.mutex);
        smu->smu_baco.state = SMU_BACO_STATE_EXIT;
        smu->smu_baco.platform_support = false;
        smu->user_dpm_profile.fan_mode = -1;
@@ -657,32 +587,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
        if (!smu->ppt_funcs->set_default_dpm_table)
                return 0;
 
-       mutex_lock(&power_gate->vcn_gate_lock);
-       mutex_lock(&power_gate->jpeg_gate_lock);
-
        vcn_gate = atomic_read(&power_gate->vcn_gated);
        jpeg_gate = atomic_read(&power_gate->jpeg_gated);
 
-       ret = smu_dpm_set_vcn_enable_locked(smu, true);
+       ret = smu_dpm_set_vcn_enable(smu, true);
        if (ret)
-               goto err0_out;
+               return ret;
 
-       ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+       ret = smu_dpm_set_jpeg_enable(smu, true);
        if (ret)
-               goto err1_out;
+               goto err_out;
 
        ret = smu->ppt_funcs->set_default_dpm_table(smu);
        if (ret)
                dev_err(smu->adev->dev,
                        "Failed to setup default dpm clock tables!\n");
 
-       smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
-err1_out:
-       smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
-err0_out:
-       mutex_unlock(&power_gate->jpeg_gate_lock);
-       mutex_unlock(&power_gate->vcn_gate_lock);
-
+       smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+err_out:
+       smu_dpm_set_vcn_enable(smu, !vcn_gate);
        return ret;
 }
 
@@ -690,7 +613,7 @@ err0_out:
 static int smu_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
        smu_set_fine_grain_gfx_freq_parameters(smu);
@@ -736,10 +659,9 @@ static int smu_late_init(void *handle)
 
        smu_get_fan_parameters(smu);
 
-       smu_handle_task(&adev->smu,
+       smu_handle_task(smu,
                        smu->smu_dpm.dpm_level,
-                       AMD_PP_TASK_COMPLETE_INIT,
-                       false);
+                       AMD_PP_TASK_COMPLETE_INIT);
 
        smu_restore_dpm_user_profile(smu);
 
@@ -964,7 +886,7 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
        if (ret)
                return ret;
 
-       ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
+       ret = smu_i2c_init(smu);
        if (ret)
                return ret;
 
@@ -975,7 +897,7 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
 {
        int ret;
 
-       smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+       smu_i2c_fini(smu);
 
        smu_free_dummy_read_table(smu);
 
@@ -1015,29 +937,21 @@ static void smu_interrupt_work_fn(struct work_struct *work)
        struct smu_context *smu = container_of(work, struct smu_context,
                                               interrupt_work);
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
                smu->ppt_funcs->interrupt_work(smu);
-
-       mutex_unlock(&smu->mutex);
 }
 
 static int smu_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret;
 
        smu->pool_size = adev->pm.smu_prv_buffer_size;
        smu->smu_feature.feature_num = SMU_FEATURE_MAX;
-       mutex_init(&smu->smu_feature.mutex);
        bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
-       bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
        bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
 
-       mutex_init(&smu->sensor_lock);
-       mutex_init(&smu->metrics_lock);
        mutex_init(&smu->message_lock);
 
        INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
@@ -1049,8 +963,6 @@ static int smu_sw_init(void *handle)
 
        atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
        atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
-       mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
-       mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
 
        smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
        smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
@@ -1101,7 +1013,7 @@ static int smu_sw_init(void *handle)
 static int smu_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret;
 
        ret = smu_smc_table_sw_fini(smu);
@@ -1144,8 +1056,10 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
 
 static int smu_smc_hw_setup(struct smu_context *smu)
 {
+       struct smu_feature *feature = &smu->smu_feature;
        struct amdgpu_device *adev = smu->adev;
        uint32_t pcie_gen = 0, pcie_width = 0;
+       uint64_t features_supported;
        int ret = 0;
 
        if (adev->in_suspend && smu_is_dpm_running(smu)) {
@@ -1225,6 +1139,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
                return ret;
        }
 
+       ret = smu_feature_get_enabled_mask(smu, &features_supported);
+       if (ret) {
+               dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+               return ret;
+       }
+       bitmap_copy(feature->supported,
+                   (unsigned long *)&features_supported,
+                   feature->feature_num);
+
        if (!smu_is_dpm_running(smu))
                dev_info(adev->dev, "dpm has been disabled\n");
 
@@ -1336,7 +1259,7 @@ static int smu_hw_init(void *handle)
 {
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
                smu->pm_enabled = false;
@@ -1352,7 +1275,7 @@ static int smu_hw_init(void *handle)
        if (smu->is_apu) {
                smu_dpm_set_vcn_enable(smu, true);
                smu_dpm_set_jpeg_enable(smu, true);
-               smu_set_gfx_cgpg(&adev->smu, true);
+               smu_set_gfx_cgpg(smu, true);
        }
 
        if (!smu->pm_enabled)
@@ -1437,9 +1360,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                case IP_VERSION(11, 5, 0):
                case IP_VERSION(11, 0, 12):
                case IP_VERSION(11, 0, 13):
-                       return smu_disable_all_features_with_exception(smu,
-                                                                      true,
-                                                                      SMU_FEATURE_COUNT);
+                       return 0;
                default:
                        break;
                }
@@ -1455,9 +1376,7 @@ static int smu_disable_dpms(struct smu_context *smu)
                case IP_VERSION(11, 0, 0):
                case IP_VERSION(11, 0, 5):
                case IP_VERSION(11, 0, 9):
-                       return smu_disable_all_features_with_exception(smu,
-                                                                      true,
-                                                                      SMU_FEATURE_BACO_BIT);
+                       return 0;
                default:
                        break;
                }
@@ -1469,7 +1388,6 @@ static int smu_disable_dpms(struct smu_context *smu)
         */
        if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
                ret = smu_disable_all_features_with_exception(smu,
-                                                             false,
                                                              SMU_FEATURE_BACO_BIT);
                if (ret)
                        dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
@@ -1512,7 +1430,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
 static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1531,13 +1449,19 @@ static int smu_hw_fini(void *handle)
        return smu_smc_hw_cleanup(smu);
 }
 
+static void smu_late_fini(void *handle)
+{
+       struct amdgpu_device *adev = handle;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+
+       kfree(smu);
+}
+
 static int smu_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret;
 
-       amdgpu_gfx_off_ctrl(smu->adev, false);
-
        ret = smu_hw_fini(adev);
        if (ret)
                return ret;
@@ -1550,15 +1474,13 @@ static int smu_reset(struct smu_context *smu)
        if (ret)
                return ret;
 
-       amdgpu_gfx_off_ctrl(smu->adev, true);
-
        return 0;
 }
 
 static int smu_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        int ret;
 
        if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
@@ -1575,7 +1497,7 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-       smu_set_gfx_cgpg(&adev->smu, false);
+       smu_set_gfx_cgpg(smu, false);
 
        return 0;
 }
@@ -1584,7 +1506,7 @@ static int smu_resume(void *handle)
 {
        int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1606,7 +1528,7 @@ static int smu_resume(void *handle)
                return ret;
        }
 
-       smu_set_gfx_cgpg(&adev->smu, true);
+       smu_set_gfx_cgpg(smu, true);
 
        smu->disable_uclk_switch = 0;
 
@@ -1630,8 +1552,6 @@ static int smu_display_configuration_change(void *handle,
        if (!display_config)
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
        smu_set_min_dcef_deep_sleep(smu,
                                    display_config->min_dcef_deep_sleep_set_clk / 100);
 
@@ -1640,8 +1560,6 @@ static int smu_display_configuration_change(void *handle,
                        num_of_active_display++;
        }
 
-       mutex_unlock(&smu->mutex);
-
        return 0;
 }
 
@@ -1675,14 +1593,7 @@ static int smu_enable_umd_pstate(void *handle,
                /* enter umd pstate, save current level, disable gfx cg*/
                if (*level & profile_mode_mask) {
                        smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
-                       smu_dpm_ctx->enable_umd_pstate = true;
                        smu_gpo_control(smu, false);
-                       amdgpu_device_ip_set_powergating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_PG_STATE_UNGATE);
-                       amdgpu_device_ip_set_clockgating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_CG_STATE_UNGATE);
                        smu_gfx_ulv_control(smu, false);
                        smu_deep_sleep_control(smu, false);
                        amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
@@ -1692,16 +1603,9 @@ static int smu_enable_umd_pstate(void *handle,
                if (!(*level & profile_mode_mask)) {
                        if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
                                *level = smu_dpm_ctx->saved_dpm_level;
-                       smu_dpm_ctx->enable_umd_pstate = false;
                        amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
                        smu_deep_sleep_control(smu, true);
                        smu_gfx_ulv_control(smu, true);
-                       amdgpu_device_ip_set_clockgating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_CG_STATE_GATE);
-                       amdgpu_device_ip_set_powergating_state(smu->adev,
-                                                              AMD_IP_BLOCK_TYPE_GFX,
-                                                              AMD_PG_STATE_GATE);
                        smu_gpo_control(smu, true);
                }
        }
@@ -1778,22 +1682,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
 
 static int smu_handle_task(struct smu_context *smu,
                           enum amd_dpm_forced_level level,
-                          enum amd_pp_task task_id,
-                          bool lock_needed)
+                          enum amd_pp_task task_id)
 {
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       if (lock_needed)
-               mutex_lock(&smu->mutex);
-
        switch (task_id) {
        case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
                ret = smu_pre_display_config_changed(smu);
                if (ret)
-                       goto out;
+                       return ret;
                ret = smu_adjust_power_state_dynamic(smu, level, false);
                break;
        case AMD_PP_TASK_COMPLETE_INIT:
@@ -1804,10 +1704,6 @@ static int smu_handle_task(struct smu_context *smu,
                break;
        }
 
-out:
-       if (lock_needed)
-               mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -1818,7 +1714,7 @@ static int smu_handle_dpm_task(void *handle,
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
-       return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
+       return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
 
 }
 
@@ -1837,8 +1733,6 @@ static int smu_switch_power_profile(void *handle,
        if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
        if (!en) {
                smu->workload_mask &= ~(1 << smu->workload_prority[type]);
                index = fls(smu->workload_mask);
@@ -1855,8 +1749,6 @@ static int smu_switch_power_profile(void *handle,
                smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
                smu_bump_power_profile_mode(smu, &workload, 0);
 
-       mutex_unlock(&smu->mutex);
-
        return 0;
 }
 
@@ -1864,7 +1756,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-       enum amd_dpm_forced_level level;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
@@ -1872,11 +1763,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
-       mutex_lock(&(smu->mutex));
-       level = smu_dpm_ctx->dpm_level;
-       mutex_unlock(&(smu->mutex));
-
-       return level;
+       return smu_dpm_ctx->dpm_level;
 }
 
 static int smu_force_performance_level(void *handle,
@@ -1892,19 +1779,12 @@ static int smu_force_performance_level(void *handle,
        if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
        ret = smu_enable_umd_pstate(smu, &level);
-       if (ret) {
-               mutex_unlock(&smu->mutex);
+       if (ret)
                return ret;
-       }
 
        ret = smu_handle_task(smu, level,
-                             AMD_PP_TASK_READJUST_POWER_STATE,
-                             false);
-
-       mutex_unlock(&smu->mutex);
+                             AMD_PP_TASK_READJUST_POWER_STATE);
 
        /* reset user dpm clock state */
        if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1918,16 +1798,11 @@ static int smu_force_performance_level(void *handle,
 static int smu_set_display_count(void *handle, uint32_t count)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-       ret = smu_init_display_count(smu, count);
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_init_display_count(smu, count);
 }
 
 static int smu_force_smuclk_levels(struct smu_context *smu,
@@ -1945,8 +1820,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
                return -EINVAL;
        }
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
                ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
                if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
@@ -1955,8 +1828,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
                }
        }
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2015,14 +1886,10 @@ static int smu_set_mp1_state(void *handle,
        if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs &&
            smu->ppt_funcs->set_mp1_state)
                ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2038,14 +1905,10 @@ static int smu_set_df_cstate(void *handle,
        if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
                return 0;
 
-       mutex_lock(&smu->mutex);
-
        ret = smu->ppt_funcs->set_df_cstate(smu, state);
        if (ret)
                dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2059,38 +1922,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
        if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
                return 0;
 
-       mutex_lock(&smu->mutex);
-
        ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
        if (ret)
                dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
 int smu_write_watermarks_table(struct smu_context *smu)
 {
-       int ret = 0;
-
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       ret = smu_set_watermarks_table(smu, NULL);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_set_watermarks_table(smu, NULL);
 }
 
 static int smu_set_watermarks_for_clock_ranges(void *handle,
                                               struct pp_smu_wm_range_sets *clock_ranges)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
@@ -2098,13 +1948,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
        if (smu->disable_watermark)
                return 0;
 
-       mutex_lock(&smu->mutex);
-
-       ret = smu_set_watermarks_table(smu, clock_ranges);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_set_watermarks_table(smu, clock_ranges);
 }
 
 int smu_set_ac_dc(struct smu_context *smu)
@@ -2118,14 +1962,12 @@ int smu_set_ac_dc(struct smu_context *smu)
        if (smu->dc_controlled_by_gpio)
                return 0;
 
-       mutex_lock(&smu->mutex);
        ret = smu_set_power_source(smu,
                                   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
                                   SMU_POWER_SOURCE_DC);
        if (ret)
                dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
                       smu->adev->pm.ac_power ? "AC" : "DC");
-       mutex_unlock(&smu->mutex);
 
        return ret;
 }
@@ -2138,6 +1980,7 @@ const struct amd_ip_funcs smu_ip_funcs = {
        .sw_fini = smu_sw_fini,
        .hw_init = smu_hw_init,
        .hw_fini = smu_hw_fini,
+       .late_fini = smu_late_fini,
        .suspend = smu_suspend,
        .resume = smu_resume,
        .is_idle = NULL,
@@ -2146,7 +1989,6 @@ const struct amd_ip_funcs smu_ip_funcs = {
        .soft_reset = NULL,
        .set_clockgating_state = smu_set_clockgating_state,
        .set_powergating_state = smu_set_powergating_state,
-       .enable_umd_pstate = smu_enable_umd_pstate,
 };
 
 const struct amdgpu_ip_block_version smu_v11_0_ip_block =
@@ -2212,13 +2054,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 {
        int ret = 0;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->set_gfx_cgpg)
                ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2230,21 +2068,21 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->set_fan_speed_rpm)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->set_fan_speed_rpm) {
-               ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
-               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
-                       smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
-                       smu->user_dpm_profile.fan_speed_rpm = speed;
+       if (speed == U32_MAX)
+               return -EINVAL;
 
-                       /* Override custom PWM setting as they cannot co-exist */
-                       smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
-                       smu->user_dpm_profile.fan_speed_pwm = 0;
-               }
-       }
+       ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+       if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+               smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
+               smu->user_dpm_profile.fan_speed_rpm = speed;
 
-       mutex_unlock(&smu->mutex);
+               /* Override custom PWM setting as they cannot co-exist */
+               smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
+               smu->user_dpm_profile.fan_speed_pwm = 0;
+       }
 
        return ret;
 }
@@ -2301,8 +2139,6 @@ int smu_get_power_limit(void *handle,
                break;
        }
 
-       mutex_lock(&smu->mutex);
-
        if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
                if (smu->ppt_funcs->get_ppt_limit)
                        ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
@@ -2336,8 +2172,6 @@ int smu_get_power_limit(void *handle,
                }
        }
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2350,21 +2184,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        limit &= (1<<24)-1;
        if (limit_type != SMU_DEFAULT_PPT_LIMIT)
-               if (smu->ppt_funcs->set_power_limit) {
-                       ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
-                       goto out;
-               }
+               if (smu->ppt_funcs->set_power_limit)
+                       return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
 
        if (limit > smu->max_power_limit) {
                dev_err(smu->adev->dev,
                        "New power limit (%d) is over the max allowed %d\n",
                        limit, smu->max_power_limit);
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        if (!limit)
@@ -2376,9 +2205,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
                        smu->user_dpm_profile.power_limit = limit;
        }
 
-out:
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2389,21 +2215,14 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->print_clk_levels)
                ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
-static int smu_print_ppclk_levels(void *handle,
-                                 enum pp_clock_type type,
-                                 char *buf)
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
 {
-       struct smu_context *smu = handle;
        enum smu_clk_type clk_type;
 
        switch (type) {
@@ -2436,12 +2255,45 @@ static int smu_print_ppclk_levels(void *handle,
        case OD_CCLK:
                clk_type = SMU_OD_CCLK; break;
        default:
-               return -EINVAL;
+               clk_type = SMU_CLK_COUNT; break;
        }
 
+       return clk_type;
+}
+
+static int smu_print_ppclk_levels(void *handle,
+                                 enum pp_clock_type type,
+                                 char *buf)
+{
+       struct smu_context *smu = handle;
+       enum smu_clk_type clk_type;
+
+       clk_type = smu_convert_to_smuclk(type);
+       if (clk_type == SMU_CLK_COUNT)
+               return -EINVAL;
+
        return smu_print_smuclk_levels(smu, clk_type, buf);
 }
 
+static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
+{
+       struct smu_context *smu = handle;
+       enum smu_clk_type clk_type;
+
+       clk_type = smu_convert_to_smuclk(type);
+       if (clk_type == SMU_CLK_COUNT)
+               return -EINVAL;
+
+       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+               return -EOPNOTSUPP;
+
+       if (!smu->ppt_funcs->emit_clk_levels)
+               return -ENOENT;
+
+       return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
+
+}
+
 static int smu_od_edit_dpm_table(void *handle,
                                 enum PP_OD_DPM_TABLE_COMMAND type,
                                 long *input, uint32_t size)
@@ -2452,14 +2304,10 @@ static int smu_od_edit_dpm_table(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->od_edit_dpm_table) {
                ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
        }
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2483,8 +2331,6 @@ static int smu_read_sensor(void *handle,
        size_val = *size_arg;
        size = &size_val;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->read_sensor)
                if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
                        goto unlock;
@@ -2499,7 +2345,7 @@ static int smu_read_sensor(void *handle,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
-               ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+               ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
                *size = 8;
                break;
        case AMDGPU_PP_SENSOR_UVD_POWER:
@@ -2525,8 +2371,6 @@ static int smu_read_sensor(void *handle,
        }
 
 unlock:
-       mutex_unlock(&smu->mutex);
-
        // assign uint32_t to int
        *size_arg = size_val;
 
@@ -2536,7 +2380,6 @@ unlock:
 static int smu_get_power_profile_mode(void *handle, char *buf)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
            !smu->ppt_funcs->get_power_profile_mode)
@@ -2544,13 +2387,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
        if (!buf)
                return -EINVAL;
 
-       mutex_lock(&smu->mutex);
-
-       ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu->ppt_funcs->get_power_profile_mode(smu, buf);
 }
 
 static int smu_set_power_profile_mode(void *handle,
@@ -2558,76 +2395,66 @@ static int smu_set_power_profile_mode(void *handle,
                                      uint32_t param_size)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
            !smu->ppt_funcs->set_power_profile_mode)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       smu_bump_power_profile_mode(smu, param, param_size);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_bump_power_profile_mode(smu, param, param_size);
 }
 
 
-static u32 smu_get_fan_control_mode(void *handle)
+static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
 {
        struct smu_context *smu = handle;
-       u32 ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return AMD_FAN_CTRL_NONE;
+               return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->get_fan_control_mode)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->get_fan_control_mode)
-               ret = smu->ppt_funcs->get_fan_control_mode(smu);
+       if (!fan_mode)
+               return -EINVAL;
 
-       mutex_unlock(&smu->mutex);
+       *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
 
-       return ret;
+       return 0;
 }
 
-static int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(void *handle, u32 value)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return  -EOPNOTSUPP;
+               return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->set_fan_control_mode)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->set_fan_control_mode) {
-               ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
-               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
-                       smu->user_dpm_profile.fan_mode = value;
-       }
+       if (value == U32_MAX)
+               return -EINVAL;
 
-       mutex_unlock(&smu->mutex);
+       ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+       if (ret)
+               goto out;
 
-       /* reset user dpm fan speed */
-       if (!ret && value != AMD_FAN_CTRL_MANUAL &&
-                       !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
-               smu->user_dpm_profile.fan_speed_pwm = 0;
-               smu->user_dpm_profile.fan_speed_rpm = 0;
-               smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+       if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+               smu->user_dpm_profile.fan_mode = value;
+
+               /* reset user dpm fan speed */
+               if (value != AMD_FAN_CTRL_MANUAL) {
+                       smu->user_dpm_profile.fan_speed_pwm = 0;
+                       smu->user_dpm_profile.fan_speed_rpm = 0;
+                       smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+               }
        }
 
+out:
        return ret;
 }
 
-static void smu_pp_set_fan_control_mode(void *handle, u32 value)
-{
-       struct smu_context *smu = handle;
-
-       smu_set_fan_control_mode(smu, value);
-}
-
-
 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
 {
        struct smu_context *smu = handle;
@@ -2636,12 +2463,13 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->get_fan_speed_pwm)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->get_fan_speed_pwm)
-               ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
+       if (!speed)
+               return -EINVAL;
 
-       mutex_unlock(&smu->mutex);
+       ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
 
        return ret;
 }
@@ -2654,21 +2482,21 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->set_fan_speed_pwm)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->set_fan_speed_pwm) {
-               ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
-               if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
-                       smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
-                       smu->user_dpm_profile.fan_speed_pwm = speed;
+       if (speed == U32_MAX)
+               return -EINVAL;
 
-                       /* Override custom RPM setting as they cannot co-exist */
-                       smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
-                       smu->user_dpm_profile.fan_speed_rpm = 0;
-               }
-       }
+       ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
+       if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+               smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
+               smu->user_dpm_profile.fan_speed_pwm = speed;
 
-       mutex_unlock(&smu->mutex);
+               /* Override custom RPM setting as they cannot co-exist */
+               smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
+               smu->user_dpm_profile.fan_speed_rpm = 0;
+       }
 
        return ret;
 }
@@ -2681,12 +2509,13 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (!smu->ppt_funcs->get_fan_speed_rpm)
+               return -EOPNOTSUPP;
 
-       if (smu->ppt_funcs->get_fan_speed_rpm)
-               ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+       if (!speed)
+               return -EINVAL;
 
-       mutex_unlock(&smu->mutex);
+       ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
 
        return ret;
 }
@@ -2694,18 +2523,11 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       ret = smu_set_min_dcef_deep_sleep(smu, clk);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return smu_set_min_dcef_deep_sleep(smu, clk);
 }
 
 static int smu_get_clock_by_type_with_latency(void *handle,
@@ -2719,8 +2541,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_clock_by_type_with_latency) {
                switch (type) {
                case amd_pp_sys_clock:
@@ -2737,15 +2557,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
                        break;
                default:
                        dev_err(smu->adev->dev, "Invalid clock type!\n");
-                       mutex_unlock(&smu->mutex);
                        return -EINVAL;
                }
 
                ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
        }
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2758,13 +2575,9 @@ static int smu_display_clock_voltage_request(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->display_clock_voltage_request)
                ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2778,13 +2591,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->display_disable_memory_clock_switch)
                ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2797,13 +2606,9 @@ static int smu_set_xgmi_pstate(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->set_xgmi_pstate)
                ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
 
-       mutex_unlock(&smu->mutex);
-
        if(ret)
                dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
 
@@ -2813,21 +2618,16 @@ static int smu_set_xgmi_pstate(void *handle,
 static int smu_get_baco_capability(void *handle, bool *cap)
 {
        struct smu_context *smu = handle;
-       int ret = 0;
 
        *cap = false;
 
        if (!smu->pm_enabled)
                return 0;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
                *cap = smu->ppt_funcs->baco_is_support(smu);
 
-       mutex_unlock(&smu->mutex);
-
-       return ret;
+       return 0;
 }
 
 static int smu_baco_set_state(void *handle, int state)
@@ -2839,20 +2639,11 @@ static int smu_baco_set_state(void *handle, int state)
                return -EOPNOTSUPP;
 
        if (state == 0) {
-               mutex_lock(&smu->mutex);
-
                if (smu->ppt_funcs->baco_exit)
                        ret = smu->ppt_funcs->baco_exit(smu);
-
-               mutex_unlock(&smu->mutex);
        } else if (state == 1) {
-               mutex_lock(&smu->mutex);
-
                if (smu->ppt_funcs->baco_enter)
                        ret = smu->ppt_funcs->baco_enter(smu);
-
-               mutex_unlock(&smu->mutex);
-
        } else {
                return -EINVAL;
        }
@@ -2871,13 +2662,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
        if (!smu->pm_enabled)
                return false;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
                ret = smu->ppt_funcs->mode1_reset_is_support(smu);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2888,13 +2675,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
        if (!smu->pm_enabled)
                return false;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
                ret = smu->ppt_funcs->mode2_reset_is_support(smu);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2905,13 +2688,9 @@ int smu_mode1_reset(struct smu_context *smu)
        if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->mode1_reset)
                ret = smu->ppt_funcs->mode1_reset(smu);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2923,13 +2702,9 @@ static int smu_mode2_reset(void *handle)
        if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->mode2_reset)
                ret = smu->ppt_funcs->mode2_reset(smu);
 
-       mutex_unlock(&smu->mutex);
-
        if (ret)
                dev_err(smu->adev->dev, "Mode2 reset failed!\n");
 
@@ -2945,13 +2720,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
                ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2965,13 +2736,9 @@ static int smu_get_uclk_dpm_states(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_uclk_dpm_states)
                ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -2983,13 +2750,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_current_power_state)
                pm_state = smu->ppt_funcs->get_current_power_state(smu);
 
-       mutex_unlock(&smu->mutex);
-
        return pm_state;
 }
 
@@ -3002,20 +2765,15 @@ static int smu_get_dpm_clock_table(void *handle,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->get_dpm_clock_table)
                ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
        struct smu_context *smu = handle;
-       ssize_t size;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
@@ -3023,13 +2781,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
        if (!smu->ppt_funcs->get_gpu_metrics)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
-       size = smu->ppt_funcs->get_gpu_metrics(smu, table);
-
-       mutex_unlock(&smu->mutex);
-
-       return size;
+       return smu->ppt_funcs->get_gpu_metrics(smu, table);
 }
 
 static int smu_enable_mgpu_fan_boost(void *handle)
@@ -3040,13 +2792,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
-
        if (smu->ppt_funcs->enable_mgpu_fan_boost)
                ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
 
-       mutex_unlock(&smu->mutex);
-
        return ret;
 }
 
@@ -3056,10 +2804,8 @@ static int smu_gfx_state_change_set(void *handle,
        struct smu_context *smu = handle;
        int ret = 0;
 
-       mutex_lock(&smu->mutex);
        if (smu->ppt_funcs->gfx_state_change_set)
                ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
-       mutex_unlock(&smu->mutex);
 
        return ret;
 }
@@ -3068,10 +2814,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
 {
        int ret = 0;
 
-       mutex_lock(&smu->mutex);
        if (smu->ppt_funcs->smu_handle_passthrough_sbr)
                ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
-       mutex_unlock(&smu->mutex);
 
        return ret;
 }
@@ -3080,11 +2824,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
 {
        int ret = -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
        if (smu->ppt_funcs &&
                smu->ppt_funcs->get_ecc_info)
                ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
-       mutex_unlock(&smu->mutex);
 
        return ret;
 
@@ -3101,24 +2843,23 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
 
        *addr = NULL;
        *size = 0;
-       mutex_lock(&smu->mutex);
        if (memory_pool->bo) {
                *addr = memory_pool->cpu_addr;
                *size = memory_pool->size;
        }
-       mutex_unlock(&smu->mutex);
 
        return 0;
 }
 
 static const struct amd_pm_funcs swsmu_pm_funcs = {
        /* export for sysfs */
-       .set_fan_control_mode    = smu_pp_set_fan_control_mode,
+       .set_fan_control_mode    = smu_set_fan_control_mode,
        .get_fan_control_mode    = smu_get_fan_control_mode,
        .set_fan_speed_pwm   = smu_set_fan_speed_pwm,
        .get_fan_speed_pwm   = smu_get_fan_speed_pwm,
        .force_clock_level       = smu_force_ppclk_levels,
        .print_clock_levels      = smu_print_ppclk_levels,
+       .emit_clock_levels       = smu_emit_ppclk_levels,
        .force_performance_level = smu_force_performance_level,
        .read_sensor             = smu_read_sensor,
        .get_performance_level   = smu_get_performance_level,
@@ -3165,17 +2906,13 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
        .get_smu_prv_buf_details = smu_get_prv_buffer_details,
 };
 
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
                       uint64_t event_arg)
 {
        int ret = -EINVAL;
-       struct smu_context *smu = &adev->smu;
 
-       if (smu->ppt_funcs->wait_for_event) {
-               mutex_lock(&smu->mutex);
+       if (smu->ppt_funcs->wait_for_event)
                ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
-               mutex_unlock(&smu->mutex);
-       }
 
        return ret;
 }
@@ -3203,7 +2940,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
 {
        struct amdgpu_device *adev = filp->f_inode->i_private;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        unsigned char *buf;
        int r;
 
@@ -3228,7 +2965,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t
                                loff_t *pos)
 {
        struct amdgpu_device *adev = filp->f_inode->i_private;
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
 
        if (!filp->private_data)
@@ -3269,7 +3006,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
 
        if (!smu->stb_context.stb_buf_size)
                return;
@@ -3281,5 +3018,14 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
                            &smu_stb_debugfs_fops,
                            smu->stb_context.stb_buf_size);
 #endif
+}
+
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
+{
+       int ret = 0;
 
+       if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
+               ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
+
+       return ret;
 }
similarity index 97%
rename from drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
rename to drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ba7565bc8104904f4d21ef2c82ed4ebd735b9508..17594ceb507e2a6c26ee26b965d12d6471910314 100644 (file)
@@ -241,11 +241,6 @@ struct smu_user_dpm_profile {
        uint32_t clk_dependency;
 };
 
-enum smu_event_type {
-
-       SMU_EVENT_RESET_COMPLETE = 0,
-};
-
 #define SMU_TABLE_INIT(tables, table_id, s, a, d)      \
        do {                                            \
                tables[table_id].size = s;              \
@@ -342,6 +337,7 @@ struct smu_table_context
        struct smu_bios_boot_up_values  boot_values;
        void                            *driver_pptable;
        void                            *ecc_table;
+       void                            *driver_smu_config_table;
        struct smu_table                tables[SMU_TABLE_COUNT];
        /*
         * The driver table is just a staging buffer for
@@ -368,7 +364,6 @@ struct smu_dpm_context {
        uint32_t dpm_context_size;
        void *dpm_context;
        void *golden_dpm_context;
-       bool enable_umd_pstate;
        enum amd_dpm_forced_level dpm_level;
        enum amd_dpm_forced_level saved_dpm_level;
        enum amd_dpm_forced_level requested_dpm_level;
@@ -382,8 +377,6 @@ struct smu_power_gate {
        bool vce_gated;
        atomic_t vcn_gated;
        atomic_t jpeg_gated;
-       struct mutex vcn_gate_lock;
-       struct mutex jpeg_gate_lock;
 };
 
 struct smu_power_context {
@@ -398,8 +391,6 @@ struct smu_feature
        uint32_t feature_num;
        DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
        DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
-       DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
-       struct mutex mutex;
 };
 
 struct smu_clocks {
@@ -436,7 +427,6 @@ enum smu_baco_state
 
 struct smu_baco_context
 {
-       struct mutex mutex;
        uint32_t state;
        bool platform_support;
 };
@@ -494,9 +484,6 @@ struct smu_context
        const struct cmn2asic_mapping   *table_map;
        const struct cmn2asic_mapping   *pwr_src_map;
        const struct cmn2asic_mapping   *workload_map;
-       struct mutex                    mutex;
-       struct mutex                    sensor_lock;
-       struct mutex                    metrics_lock;
        struct mutex                    message_lock;
        uint64_t pool_size;
 
@@ -618,9 +605,23 @@ struct pptable_funcs {
         *                    to buffer. Star current level.
         *
         * Used for sysfs interfaces.
+        * Return: Number of characters written to the buffer
         */
        int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
 
+       /**
+        * @emit_clk_levels: Print DPM clock levels for a clock domain
+        *                    to buffer using sysfs_emit_at. Star current level.
+        *
+        * Used for sysfs interfaces.
+        * &buf: sysfs buffer
+        * &offset: offset within buffer to start printing, which is updated by the
+        * function.
+        *
+        * Return: 0 on Success or Negative to indicate an error occurred.
+        */
+       int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
+
        /**
         * @force_clk_levels: Set a range of allowed DPM levels for a clock
         *                    domain.
@@ -829,12 +830,12 @@ struct pptable_funcs {
         * other devices. The i2c's EEPROM also stores bad page tables on boards
         * with ECC.
         */
-       int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+       int (*i2c_init)(struct smu_context *smu);
 
        /**
         * @i2c_fini: Tear down i2c.
         */
-       void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+       void (*i2c_fini)(struct smu_context *smu);
 
        /**
         * @get_unique_id: Get the GPU's unique id. Used for asset tracking.
@@ -988,10 +989,9 @@ struct pptable_funcs {
        /**
         * @get_enabled_mask: Get a mask of features that are currently enabled
         *                    on the SMU.
-        * &feature_mask: Array representing enabled feature mask.
-        * &num: Elements in &feature_mask.
+        * &feature_mask: Enabled feature mask.
         */
-       int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+       int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask);
 
        /**
         * @feature_is_enabled: Test if a feature is enabled.
@@ -1005,7 +1005,6 @@ struct pptable_funcs {
         *                                       exception to those in &mask.
         */
        int (*disable_all_features_with_exception)(struct smu_context *smu,
-                                                  bool no_hw_disablement,
                                                   enum smu_feature_mask mask);
 
        /**
@@ -1395,10 +1394,6 @@ int smu_mode1_reset(struct smu_context *smu);
 
 extern const struct amd_ip_funcs smu_ip_funcs;
 
-extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
-
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_cclk_dpm(struct amdgpu_device *adev);
 int smu_write_watermarks_table(struct smu_context *smu);
@@ -1413,15 +1408,15 @@ int smu_set_ac_dc(struct smu_context *smu);
 
 int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
 
 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
 
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
                       uint64_t event_arg);
 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
 int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
-
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
 #endif
 #endif
similarity index 99%
rename from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
rename to drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 63b8701fd4668a307190c1fed51dcfba5359af14..b253be602cc2a59ed5dac8682bb0fa12dab84b92 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x3B
+#define SMU11_DRIVER_IF_VERSION 0x40
 
 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 7
 
@@ -172,6 +172,7 @@ typedef enum {
 #define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER         0x00001000
 #define DPM_OVERRIDE_DISABLE_VCN_PG                  0x00002000
 #define DPM_OVERRIDE_DISABLE_FMAX_VMAX               0x00004000
+#define DPM_OVERRIDE_ENABLE_eGPU_USB_WA              0x00008000
 
 // VR Mapping Bit Defines
 #define VR_MAPPING_VR_SELECT_MASK  0x01
@@ -263,7 +264,22 @@ typedef enum {
 #define LED_DISPLAY_ERROR_BIT              2
 
 //RLC Pace Table total number of levels
-#define RLC_PACE_TABLE_NUM_LEVELS 16
+#define RLC_PACE_TABLE_NUM_LEVELS          16
+#define SIENNA_CICHLID_UMC_CHANNEL_NUM     16
+
+typedef struct {
+  uint64_t mca_umc_status;
+  uint64_t mca_umc_addr;
+
+  uint16_t ce_count_lo_chip;
+  uint16_t ce_count_hi_chip;
+
+  uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+  EccInfo_t  EccInfo[SIENNA_CICHLID_UMC_CHANNEL_NUM];
+} EccInfoTable_t;
 
 typedef enum {
   DRAM_BIT_WIDTH_DISABLED = 0,
@@ -283,6 +299,7 @@ typedef enum {
 
 #define MAX_SW_I2C_COMMANDS                24
 
+
 typedef enum {
   I2C_CONTROLLER_PORT_0 = 0,  //CKSVII2C0
   I2C_CONTROLLER_PORT_1 = 1,  //CKSVII2C1
@@ -1672,7 +1689,8 @@ typedef struct {
 #define TABLE_OVERDRIVE               8
 #define TABLE_I2C_COMMANDS            9
 #define TABLE_PACE                   10
-#define TABLE_COUNT                  11
+#define TABLE_ECCINFO                11
+#define TABLE_COUNT                  12
 
 typedef struct {
   float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS];
index 505d2fb94fd936052937b6e45cb938bce2791b9d..201563072189d688a8e8273a54d59952c6a99542 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/firmware.h>
 #include "amdgpu.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
 #include "smu11_driver_if_arcturus.h"
 #include "soc15_common.h"
 #include "atom.h"
-#include "power_state.h"
 #include "arcturus_ppt.h"
 #include "smu_v11_0_pptable.h"
 #include "arcturus_ppsmc.h"
@@ -57,8 +57,6 @@
 #undef pr_info
 #undef pr_debug
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
 #define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
        [smu_feature] = {1, (arcturus_feature)}
 
@@ -603,15 +601,11 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -694,8 +688,6 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -1120,7 +1112,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1181,7 +1172,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -2031,15 +2021,12 @@ static void arcturus_dump_pptable(struct smu_context *smu)
 static bool arcturus_is_dpm_running(struct smu_context *smu)
 {
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
-       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
        if (ret)
                return false;
 
-       feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
@@ -2071,18 +2058,23 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
                             struct i2c_msg *msg, int num_msgs)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
-       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+       struct amdgpu_device *adev = smu_i2c->adev;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = &smu_table->driver_table;
        SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
        int i, j, r, c;
        u16 dir;
 
+       if (!adev->pm.dpm_enabled)
+               return -EBUSY;
+
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
-       req->I2CcontrollerPort = 0;
+       req->I2CcontrollerPort = smu_i2c->port;
        req->I2CSpeed = I2C_SPEED_FAST_400K;
        req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
        dir = msg[0].flags & I2C_M_RD;
@@ -2118,9 +2110,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
                        }
                }
        }
-       mutex_lock(&adev->smu.mutex);
-       r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->smu.mutex);
+       mutex_lock(&adev->pm.mutex);
+       r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -2161,28 +2153,60 @@ static const struct i2c_adapter_quirks arcturus_i2c_control_quirks = {
        .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
 };
 
-static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int arcturus_i2c_control_init(struct smu_context *smu)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       int res;
+       struct amdgpu_device *adev = smu->adev;
+       int res, i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               smu_i2c->adev = adev;
+               smu_i2c->port = i;
+               mutex_init(&smu_i2c->mutex);
+               control->owner = THIS_MODULE;
+               control->class = I2C_CLASS_HWMON;
+               control->dev.parent = &adev->pdev->dev;
+               control->algo = &arcturus_i2c_algo;
+               control->quirks = &arcturus_i2c_control_quirks;
+               snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+               i2c_set_adapdata(control, smu_i2c);
+
+               res = i2c_add_adapter(control);
+               if (res) {
+                       DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+                       goto Out_err;
+               }
+       }
 
-       control->owner = THIS_MODULE;
-       control->class = I2C_CLASS_HWMON;
-       control->dev.parent = &adev->pdev->dev;
-       control->algo = &arcturus_i2c_algo;
-       control->quirks = &arcturus_i2c_control_quirks;
-       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+       adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+       adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
 
-       res = i2c_add_adapter(control);
-       if (res)
-               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+       return 0;
+Out_err:
+       for ( ; i >= 0; i--) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
 
+               i2c_del_adapter(control);
+       }
        return res;
 }
 
-static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void arcturus_i2c_control_fini(struct smu_context *smu)
 {
-       i2c_del_adapter(control);
+       struct amdgpu_device *adev = smu->adev;
+       int i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               i2c_del_adapter(control);
+       }
+       adev->pm.ras_eeprom_i2c_bus = NULL;
+       adev->pm.fru_eeprom_i2c_bus = NULL;
 }
 
 static void arcturus_get_unique_id(struct smu_context *smu)
index 2238ee19c2226a5fa80cbf8ea7ba769132094d23..b3a0f3fb3e65754276e95df5b9ff7859f9418a66 100644 (file)
@@ -125,22 +125,6 @@ static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
        return smu_v11_0_init_smc_tables(smu);
 }
 
-static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-
-       kfree(smu_table->metrics_table);
-       smu_table->metrics_table = NULL;
-
-       kfree(smu_table->gpu_metrics_table);
-       smu_table->gpu_metrics_table = NULL;
-       smu_table->gpu_metrics_table_size = 0;
-
-       smu_table->metrics_time = 0;
-
-       return 0;
-}
-
 static int
 cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
                                        MetricsMember_t member,
@@ -150,13 +134,9 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu, NULL, false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -200,8 +180,6 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -215,8 +193,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
-
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
                ret = cyan_skillfish_get_smu_metrics_data(smu,
@@ -267,8 +243,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->sensor_lock);
-
        return ret;
 }
 
@@ -376,20 +350,16 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
        /* we need to re-init after suspend so return false */
        if (adev->in_suspend)
                return false;
 
-       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
        if (ret)
                return false;
 
-       feature_enabled = (uint64_t)feature_mask[0] |
-                               ((uint64_t)feature_mask[1] << 32);
-
        /*
         * cyan_skillfish specific, query default sclk inseted of hard code.
         */
@@ -552,6 +522,36 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
        return ret;
 }
 
+static int cyan_skillfish_get_dpm_ultimate_freq(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t *min,
+                                               uint32_t *max)
+{
+       int ret = 0;
+       uint32_t low, high;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+               low = CYAN_SKILLFISH_SCLK_MIN;
+               high = CYAN_SKILLFISH_SCLK_MAX;
+               break;
+       default:
+               ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &low);
+               if (ret)
+                       return ret;
+               high = low;
+               break;
+       }
+
+       if (min)
+               *min = low;
+       if (max)
+               *max = high;
+
+       return 0;
+}
+
 static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
 
        .check_fw_status = smu_v11_0_check_fw_status,
@@ -559,12 +559,14 @@ static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
        .init_smc_tables = cyan_skillfish_init_smc_tables,
-       .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+       .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .read_sensor = cyan_skillfish_read_sensor,
        .print_clk_levels = cyan_skillfish_print_clk_levels,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
        .is_dpm_running = cyan_skillfish_is_dpm_running,
        .get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
        .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
+       .get_dpm_ultimate_freq = cyan_skillfish_get_dpm_ultimate_freq,
        .register_irq_handler = smu_v11_0_register_irq_handler,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
index 2bb7816b245aad6366d20f4cb19aaed05192808d..66f9276c44993fb180b5aeb71fa46069ae7420e7 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/i2c.h>
 #include "amdgpu.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
@@ -57,8 +58,6 @@
 #undef pr_info
 #undef pr_debug
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
        FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -511,6 +510,8 @@ static int navi10_tables_init(struct smu_context *smu)
        SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
+                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
        smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
                                           GFP_KERNEL);
@@ -527,8 +528,15 @@ static int navi10_tables_init(struct smu_context *smu)
        if (!smu_table->watermarks_table)
                goto err2_out;
 
+       smu_table->driver_smu_config_table =
+               kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+       if (!smu_table->driver_smu_config_table)
+               goto err3_out;
+
        return 0;
 
+err3_out:
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
@@ -546,15 +554,11 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
                (SmuMetrics_legacy_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -624,8 +628,6 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -638,15 +640,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
                (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -719,8 +717,6 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -733,15 +729,11 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
                (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -811,8 +803,6 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -825,15 +815,11 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
                (SmuMetrics_NV12_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -906,8 +892,6 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -1261,6 +1245,215 @@ static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_tabl
                *max = od_table->max[setting];
 }
 
+static int navi10_emit_clk_levels(struct smu_context *smu,
+                                 enum smu_clk_type clk_type,
+                                 char *buf,
+                                 int *offset)
+{
+       uint16_t *curve_settings;
+       int ret = 0;
+       uint32_t cur_value = 0, value = 0;
+       uint32_t freq_values[3] = {0};
+       uint32_t i, levels, mark_index = 0, count = 0;
+       struct smu_table_context *table_context = &smu->smu_table;
+       uint32_t gen_speed, lane_width;
+       struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+       struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+       PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+       OverDriveTable_t *od_table =
+               (OverDriveTable_t *)table_context->overdrive_table;
+       struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+       uint32_t min_value, max_value;
+
+       switch (clk_type) {
+       case SMU_GFXCLK:
+       case SMU_SCLK:
+       case SMU_SOCCLK:
+       case SMU_MCLK:
+       case SMU_UCLK:
+       case SMU_FCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
+       case SMU_DCEFCLK:
+               ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
+               if (ret)
+                       return ret;
+
+               ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
+               if (ret)
+                       return ret;
+
+               if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+                       for (i = 0; i < count; i++) {
+                               ret = smu_v11_0_get_dpm_freq_by_index(smu,
+                                                                     clk_type, i, &value);
+                               if (ret)
+                                       return ret;
+
+                               *offset += sysfs_emit_at(buf, *offset,
+                                               "%d: %uMhz %s\n",
+                                               i, value,
+                                               cur_value == value ? "*" : "");
+                       }
+               } else {
+                       ret = smu_v11_0_get_dpm_freq_by_index(smu,
+                                                             clk_type, 0, &freq_values[0]);
+                       if (ret)
+                               return ret;
+                       ret = smu_v11_0_get_dpm_freq_by_index(smu,
+                                                             clk_type,
+                                                             count - 1,
+                                                             &freq_values[2]);
+                       if (ret)
+                               return ret;
+
+                       freq_values[1] = cur_value;
+                       mark_index = cur_value == freq_values[0] ? 0 :
+                                    cur_value == freq_values[2] ? 2 : 1;
+
+                       levels = 3;
+                       if (mark_index != 1) {
+                               levels = 2;
+                               freq_values[1] = freq_values[2];
+                       }
+
+                       for (i = 0; i < levels; i++) {
+                               *offset += sysfs_emit_at(buf, *offset,
+                                               "%d: %uMhz %s\n",
+                                               i, freq_values[i],
+                                               i == mark_index ? "*" : "");
+                       }
+               }
+               break;
+       case SMU_PCIE:
+               gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+               lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       *offset += sysfs_emit_at(buf, *offset, "%d: %s %s %dMhz %s\n", i,
+                                       (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+                                       (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+                                       pptable->LclkFreq[i],
+                                       (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+                                       (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+                                       "*" : "");
+               }
+               break;
+       case SMU_OD_SCLK:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       return -EOPNOTSUPP;
+               if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
+                       break;
+               *offset += sysfs_emit_at(buf, *offset, "OD_SCLK:\n0: %uMhz\n1: %uMhz\n",
+                                         od_table->GfxclkFmin, od_table->GfxclkFmax);
+               break;
+       case SMU_OD_MCLK:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       return -EOPNOTSUPP;
+               if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
+                       break;
+               *offset += sysfs_emit_at(buf, *offset, "OD_MCLK:\n1: %uMHz\n", od_table->UclkFmax);
+               break;
+       case SMU_OD_VDDC_CURVE:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       return -EOPNOTSUPP;
+               if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
+                       break;
+               *offset += sysfs_emit_at(buf, *offset, "OD_VDDC_CURVE:\n");
+               for (i = 0; i < 3; i++) {
+                       switch (i) {
+                       case 0:
+                               curve_settings = &od_table->GfxclkFreq1;
+                               break;
+                       case 1:
+                               curve_settings = &od_table->GfxclkFreq2;
+                               break;
+                       case 2:
+                               curve_settings = &od_table->GfxclkFreq3;
+                               break;
+                       default:
+                               break;
+                       }
+                       *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n",
+                                                 i, curve_settings[0],
+                                       curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+               }
+               break;
+       case SMU_OD_RANGE:
+               if (!smu->od_enabled || !od_table || !od_settings)
+                       return -EOPNOTSUPP;
+               *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
+
+               if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+                       navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+                                                   &min_value, NULL);
+                       navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+                                                   NULL, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMhz %10uMhz\n",
+                                       min_value, max_value);
+               }
+
+               if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+                       navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMhz %10uMhz\n",
+                                       min_value, max_value);
+               }
+
+               if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+                                                min_value, max_value);
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+                                                min_value, max_value);
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+                                                min_value, max_value);
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+                                                min_value, max_value);
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+                                                min_value, max_value);
+                       navi10_od_setting_get_range(od_settings,
+                                                   SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+                                                   &min_value, &max_value);
+                       *offset += sysfs_emit_at(buf, *offset,
+                                                "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+                                                min_value, max_value);
+               }
+
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 static int navi10_print_clk_levels(struct smu_context *smu,
                        enum smu_clk_type clk_type, char *buf)
 {
@@ -1649,8 +1842,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+           smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+           smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
                                                  smu->display_config->num_display,
                                                  NULL);
@@ -1664,15 +1857,12 @@ static int navi10_display_config_changed(struct smu_context *smu)
 static bool navi10_is_dpm_running(struct smu_context *smu)
 {
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
-       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
        if (ret)
                return false;
 
-       feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
@@ -1888,13 +2078,13 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
        min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
 
-       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                clock_req.clock_type = amd_pp_dcef_clock;
                clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
 
                ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
                if (!ret) {
-                       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+                       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
                                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                  SMU_MSG_SetMinDeepSleepDcefclk,
                                                                  min_clocks.dcef_clock_in_sr/100,
@@ -1988,7 +2178,6 @@ static int navi10_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -2048,7 +2237,6 @@ static int navi10_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -2708,20 +2896,14 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
        SmuMetrics_legacy_t metrics;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              true);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       true);
+       if (ret)
                return ret;
-       }
 
        memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
 
-       mutex_unlock(&smu->metrics_lock);
-
        smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2778,18 +2960,23 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
                           struct i2c_msg *msg, int num_msgs)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
-       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+       struct amdgpu_device *adev = smu_i2c->adev;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = &smu_table->driver_table;
        SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
        int i, j, r, c;
        u16 dir;
 
+       if (!adev->pm.dpm_enabled)
+               return -EBUSY;
+
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
-       req->I2CcontrollerPort = 0;
+       req->I2CcontrollerPort = smu_i2c->port;
        req->I2CSpeed = I2C_SPEED_FAST_400K;
        req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
        dir = msg[0].flags & I2C_M_RD;
@@ -2825,9 +3012,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
                        }
                }
        }
-       mutex_lock(&adev->smu.mutex);
-       r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->smu.mutex);
+       mutex_lock(&adev->pm.mutex);
+       r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -2867,28 +3054,60 @@ static const struct i2c_adapter_quirks navi10_i2c_control_quirks = {
        .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
 };
 
-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int navi10_i2c_control_init(struct smu_context *smu)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       int res;
+       struct amdgpu_device *adev = smu->adev;
+       int res, i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               smu_i2c->adev = adev;
+               smu_i2c->port = i;
+               mutex_init(&smu_i2c->mutex);
+               control->owner = THIS_MODULE;
+               control->class = I2C_CLASS_HWMON;
+               control->dev.parent = &adev->pdev->dev;
+               control->algo = &navi10_i2c_algo;
+               snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+               control->quirks = &navi10_i2c_control_quirks;
+               i2c_set_adapdata(control, smu_i2c);
+
+               res = i2c_add_adapter(control);
+               if (res) {
+                       DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+                       goto Out_err;
+               }
+       }
 
-       control->owner = THIS_MODULE;
-       control->class = I2C_CLASS_HWMON;
-       control->dev.parent = &adev->pdev->dev;
-       control->algo = &navi10_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
-       control->quirks = &navi10_i2c_control_quirks;
+       adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+       adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
 
-       res = i2c_add_adapter(control);
-       if (res)
-               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+       return 0;
+Out_err:
+       for ( ; i >= 0; i--) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
 
+               i2c_del_adapter(control);
+       }
        return res;
 }
 
-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void navi10_i2c_control_fini(struct smu_context *smu)
 {
-       i2c_del_adapter(control);
+       struct amdgpu_device *adev = smu->adev;
+       int i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               i2c_del_adapter(control);
+       }
+       adev->pm.ras_eeprom_i2c_bus = NULL;
+       adev->pm.fru_eeprom_i2c_bus = NULL;
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
@@ -2900,20 +3119,14 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
        SmuMetrics_t metrics;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              true);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       true);
+       if (ret)
                return ret;
-       }
 
        memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
 
-       mutex_unlock(&smu->metrics_lock);
-
        smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2978,20 +3191,14 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
        SmuMetrics_NV12_legacy_t metrics;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              true);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       true);
+       if (ret)
                return ret;
-       }
 
        memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
 
-       mutex_unlock(&smu->metrics_lock);
-
        smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3059,20 +3266,14 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
        SmuMetrics_NV12_t metrics;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              true);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       true);
+       if (ret)
                return ret;
-       }
 
        memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
 
-       mutex_unlock(&smu->metrics_lock);
-
        smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3237,6 +3438,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .i2c_init = navi10_i2c_control_init,
        .i2c_fini = navi10_i2c_control_fini,
        .print_clk_levels = navi10_print_clk_levels,
+       .emit_clk_levels = navi10_emit_clk_levels,
        .force_clk_levels = navi10_force_clk_levels,
        .populate_umd_state_clk = navi10_populate_umd_state_clk,
        .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
index 777f717c37aec14423bc6a2067df28d7e204c6f3..358031c8c79ef258b0ca2f371662ea536f3586fd 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/i2c.h>
 #include "amdgpu.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
@@ -46,6 +47,7 @@
 #include "mp/mp_11_0_sh_mask.h"
 
 #include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "amdgpu_ras.h"
 #include "smu_cmn.h"
 
 /*
@@ -58,8 +60,6 @@
 #undef pr_info
 #undef pr_debug
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
        FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
 /* STB FIFO depth is in 64bit units */
 #define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
 
+/*
+ * SMU support ECCTABLE since version 58.70.0,
+ * use this to check whether ECCTABLE feature is supported.
+ */
+#define SUPPORT_ECCTABLE_SMU_VERSION 0x003a4600
+
 static int get_table_size(struct smu_context *smu)
 {
        if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
@@ -225,6 +231,7 @@ static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(OVERDRIVE),
        TAB_MAP(I2C_COMMANDS),
        TAB_MAP(PACE),
+       TAB_MAP(ECCINFO),
 };
 
 static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -466,6 +473,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
        SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
                       sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfigExternal_t),
+                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
        smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
        if (!smu_table->metrics_table)
@@ -481,8 +492,21 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
        if (!smu_table->watermarks_table)
                goto err2_out;
 
+       smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+       if (!smu_table->ecc_table)
+               goto err3_out;
+
+       smu_table->driver_smu_config_table =
+               kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+       if (!smu_table->driver_smu_config_table)
+               goto err4_out;
+
        return 0;
 
+err4_out:
+       kfree(smu_table->ecc_table);
+err3_out:
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
@@ -525,15 +549,11 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
        uint16_t average_gfx_activity;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -633,8 +653,6 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 
 }
@@ -1036,10 +1054,6 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
                if (ret)
                        goto print_clk_out;
 
-               /* no need to disable gfxoff when retrieving the current gfxclk */
-               if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
-                       amdgpu_gfx_off_ctrl(adev, false);
-
                ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
                if (ret)
                        goto print_clk_out;
@@ -1168,25 +1182,18 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        }
 
 print_clk_out:
-       if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
-               amdgpu_gfx_off_ctrl(adev, true);
-
        return size;
 }
 
 static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
                                   enum smu_clk_type clk_type, uint32_t mask)
 {
-       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
 
        soft_min_level = mask ? (ffs(mask) - 1) : 0;
        soft_max_level = mask ? (fls(mask) - 1) : 0;
 
-       if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
-               amdgpu_gfx_off_ctrl(adev, false);
-
        switch (clk_type) {
        case SMU_GFXCLK:
        case SMU_SCLK:
@@ -1220,9 +1227,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
        }
 
 forec_level_out:
-       if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
-               amdgpu_gfx_off_ctrl(adev, true);
-
        return 0;
 }
 
@@ -1238,21 +1242,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
                                &dpm_context->dpm_tables.soc_table;
        struct smu_umd_pstate_table *pstate_table =
                                &smu->pstate_table;
+       struct amdgpu_device *adev = smu->adev;
 
        pstate_table->gfxclk_pstate.min = gfx_table->min;
        pstate_table->gfxclk_pstate.peak = gfx_table->max;
-       if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
-               pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
 
        pstate_table->uclk_pstate.min = mem_table->min;
        pstate_table->uclk_pstate.peak = mem_table->max;
-       if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
-               pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
 
        pstate_table->socclk_pstate.min = soc_table->min;
        pstate_table->socclk_pstate.peak = soc_table->max;
-       if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
+               pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
                pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       case CHIP_DIMGREY_CAVEFISH:
+               pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+               pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       case CHIP_BEIGE_GOBY:
+               pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+               pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+               pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+               break;
+       default:
+               break;
+       }
 
        return 0;
 }
@@ -1287,8 +1307,8 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+           smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+           smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
 #if 0
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
                                                  smu->display_config->num_display,
@@ -1304,15 +1324,12 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
 static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
 {
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
-       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
        if (ret)
                return false;
 
-       feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
@@ -1527,13 +1544,13 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
        min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
 
-       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                clock_req.clock_type = amd_pp_dcef_clock;
                clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
 
                ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
                if (!ret) {
-                       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+                       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
                                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                  SMU_MSG_SetMinDeepSleepDcefclk,
                                                                  min_clocks.dcef_clock_in_sr/100,
@@ -1625,7 +1642,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                GET_PPTABLE_MEMBER(FanMaximumRpm, &temp);
@@ -1686,7 +1702,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -1865,16 +1880,7 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
                                enum smu_clk_type clk_type,
                                uint32_t *min, uint32_t *max)
 {
-       struct amdgpu_device *adev = smu->adev;
-       int ret;
-
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, false);
-       ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, true);
-
-       return ret;
+       return smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
 }
 
 static void sienna_cichlid_dump_od_table(struct smu_context *smu,
@@ -3458,18 +3464,23 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
 static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
                                   struct i2c_msg *msg, int num_msgs)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
-       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+       struct amdgpu_device *adev = smu_i2c->adev;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = &smu_table->driver_table;
        SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
        int i, j, r, c;
        u16 dir;
 
+       if (!adev->pm.dpm_enabled)
+               return -EBUSY;
+
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
-       req->I2CcontrollerPort = 1;
+       req->I2CcontrollerPort = smu_i2c->port;
        req->I2CSpeed = I2C_SPEED_FAST_400K;
        req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
        dir = msg[0].flags & I2C_M_RD;
@@ -3505,9 +3516,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
                        }
                }
        }
-       mutex_lock(&adev->smu.mutex);
-       r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->smu.mutex);
+       mutex_lock(&adev->pm.mutex);
+       r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -3547,28 +3558,61 @@ static const struct i2c_adapter_quirks sienna_cichlid_i2c_control_quirks = {
        .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
 };
 
-static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       int res;
-
-       control->owner = THIS_MODULE;
-       control->class = I2C_CLASS_HWMON;
-       control->dev.parent = &adev->pdev->dev;
-       control->algo = &sienna_cichlid_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
-       control->quirks = &sienna_cichlid_i2c_control_quirks;
+       struct amdgpu_device *adev = smu->adev;
+       int res, i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               smu_i2c->adev = adev;
+               smu_i2c->port = i;
+               mutex_init(&smu_i2c->mutex);
+               control->owner = THIS_MODULE;
+               control->class = I2C_CLASS_HWMON;
+               control->dev.parent = &adev->pdev->dev;
+               control->algo = &sienna_cichlid_i2c_algo;
+               snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+               control->quirks = &sienna_cichlid_i2c_control_quirks;
+               i2c_set_adapdata(control, smu_i2c);
+
+               res = i2c_add_adapter(control);
+               if (res) {
+                       DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+                       goto Out_err;
+               }
+       }
+       /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+       /* XXX ideally this would be something in a vbios data table */
+       adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+       adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
 
-       res = i2c_add_adapter(control);
-       if (res)
-               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+       return 0;
+Out_err:
+       for ( ; i >= 0; i--) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
 
+               i2c_del_adapter(control);
+       }
        return res;
 }
 
-static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
 {
-       i2c_del_adapter(control);
+       struct amdgpu_device *adev = smu->adev;
+       int i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               i2c_del_adapter(control);
+       }
+       adev->pm.ras_eeprom_i2c_bus = NULL;
+       adev->pm.fru_eeprom_i2c_bus = NULL;
 }
 
 static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
@@ -3588,14 +3632,11 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
        uint16_t average_gfx_activity;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              &metrics_external,
-                                              true);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       &metrics_external,
+                                       true);
+       if (ret)
                return ret;
-       }
 
        smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
@@ -3685,8 +3726,6 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
                                smu_v11_0_get_current_pcie_link_speed(smu);
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
        *table = (void *)gpu_metrics;
@@ -3694,16 +3733,70 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v1_3);
 }
 
+static int sienna_cichlid_check_ecc_table_support(struct smu_context *smu)
+{
+       uint32_t if_version = 0xff, smu_version = 0xff;
+       int ret = 0;
+
+       ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+       if (ret)
+               return -EOPNOTSUPP;
+
+       if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+               ret = -EOPNOTSUPP;
+
+       return ret;
+}
+
+static ssize_t sienna_cichlid_get_ecc_info(struct smu_context *smu,
+                                       void *table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       EccInfoTable_t *ecc_table = NULL;
+       struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+       int i, ret = 0;
+       struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+       ret = sienna_cichlid_check_ecc_table_support(smu);
+       if (ret)
+               return ret;
+
+       ret = smu_cmn_update_table(smu,
+                               SMU_TABLE_ECCINFO,
+                               0,
+                               smu_table->ecc_table,
+                               false);
+       if (ret) {
+               dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
+               return ret;
+       }
+
+       ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+       for (i = 0; i < SIENNA_CICHLID_UMC_CHANNEL_NUM; i++) {
+               ecc_info_per_channel = &(eccinfo->ecc[i]);
+               ecc_info_per_channel->ce_count_lo_chip =
+                       ecc_table->EccInfo[i].ce_count_lo_chip;
+               ecc_info_per_channel->ce_count_hi_chip =
+                       ecc_table->EccInfo[i].ce_count_hi_chip;
+               ecc_info_per_channel->mca_umc_status =
+                       ecc_table->EccInfo[i].mca_umc_status;
+               ecc_info_per_channel->mca_umc_addr =
+                       ecc_table->EccInfo[i].mca_umc_addr;
+       }
+
+       return ret;
+}
 static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
 {
-       struct smu_table_context *table_context = &smu->smu_table;
-       PPTable_t *smc_pptable = table_context->driver_pptable;
+       uint16_t *mgpu_fan_boost_limit_rpm;
 
+       GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
        /*
         * Skip the MGpuFanBoost setting for those ASICs
         * which do not support it
         */
-       if (!smc_pptable->MGpuFanBoostLimitRpm)
+       if (*mgpu_fan_boost_limit_rpm == 0)
                return 0;
 
        return smu_cmn_send_smc_msg_with_param(smu,
@@ -3719,7 +3812,7 @@ static int sienna_cichlid_gpo_control(struct smu_context *smu,
        int ret = 0;
 
 
-       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
                ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
                if (ret)
                        return ret;
@@ -3832,9 +3925,9 @@ static void sienna_cichlid_stb_init(struct smu_context *smu)
 
 }
 
-int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
-                                      void *buf,
-                                      uint32_t size)
+static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
+                                             void *buf,
+                                             uint32_t size)
 {
        uint32_t *p = buf;
        struct amdgpu_device *adev = smu->adev;
@@ -3945,6 +4038,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .gpo_control = sienna_cichlid_gpo_control,
        .set_mp1_state = sienna_cichlid_set_mp1_state,
        .stb_collect_info = sienna_cichlid_stb_get_data_direct,
+       .get_ecc_info = sienna_cichlid_get_ecc_info,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
index 38cd0ece24f6b16ffa3cc6a06b179c5ca16f6c1a..42f705c7a36f81e641d225eb2f15401546545ce4 100644 (file)
@@ -33,6 +33,14 @@ typedef enum {
 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK    960
 #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK    1000
 
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
 extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
 
 #endif
index 4e9e2cf398591f7dba0f4cc4180162ccc5303138..b87f550af26bae9661432e844c669089caf5adbf 100644 (file)
@@ -225,15 +225,15 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        uint32_t if_version = 0xff, smu_version = 0xff;
-       uint16_t smu_major;
-       uint8_t smu_minor, smu_debug;
+       uint8_t smu_program, smu_major, smu_minor, smu_debug;
        int ret = 0;
 
        ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
                return ret;
 
-       smu_major = (smu_version >> 16) & 0xffff;
+       smu_program = (smu_version >> 24) & 0xff;
+       smu_major = (smu_version >> 16) & 0xff;
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
        if (smu->is_apu)
@@ -287,9 +287,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
         */
        if (if_version != smu->smc_driver_if_version) {
                dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
-                       "smu fw version = 0x%08x (%d.%d.%d)\n",
+                       "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
                        smu->smc_driver_if_version, if_version,
-                       smu_version, smu_major, smu_minor, smu_debug);
+                       smu_program, smu_version, smu_major, smu_minor, smu_debug);
                dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
        }
 
@@ -473,8 +473,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
        kfree(smu_table->hardcode_pptable);
        smu_table->hardcode_pptable = NULL;
 
+       kfree(smu_table->driver_smu_config_table);
+       kfree(smu_table->ecc_table);
        kfree(smu_table->metrics_table);
        kfree(smu_table->watermarks_table);
+       smu_table->driver_smu_config_table = NULL;
+       smu_table->ecc_table = NULL;
        smu_table->metrics_table = NULL;
        smu_table->watermarks_table = NULL;
        smu_table->metrics_time = 0;
@@ -796,30 +800,8 @@ failed:
 int smu_v11_0_system_features_control(struct smu_context *smu,
                                             bool en)
 {
-       struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_mask[2];
-       int ret = 0;
-
-       ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-                                    SMU_MSG_DisableAllSmuFeatures), NULL);
-       if (ret)
-               return ret;
-
-       bitmap_zero(feature->enabled, feature->feature_num);
-       bitmap_zero(feature->supported, feature->feature_num);
-
-       if (en) {
-               ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
-               if (ret)
-                       return ret;
-
-               bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                           feature->feature_num);
-               bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                           feature->feature_num);
-       }
-
-       return ret;
+       return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+                                         SMU_MSG_DisableAllSmuFeatures), NULL);
 }
 
 int smu_v11_0_notify_display_change(struct smu_context *smu)
@@ -1372,7 +1354,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
                                   unsigned tyep,
                                   enum amdgpu_interrupt_state state)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        uint32_t low, high;
        uint32_t val = 0;
 
@@ -1441,7 +1423,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        uint32_t client_id = entry->client_id;
        uint32_t src_id = entry->src_id;
        /*
@@ -1615,13 +1597,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
 {
        struct smu_baco_context *smu_baco = &smu->smu_baco;
-       enum smu_baco_state baco_state;
-
-       mutex_lock(&smu_baco->mutex);
-       baco_state = smu_baco->state;
-       mutex_unlock(&smu_baco->mutex);
 
-       return baco_state;
+       return smu_baco->state;
 }
 
 #define D3HOT_BACO_SEQUENCE 0
@@ -1638,8 +1615,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
        if (smu_v11_0_baco_get_state(smu) == state)
                return 0;
 
-       mutex_lock(&smu_baco->mutex);
-
        if (state == SMU_BACO_STATE_ENTER) {
                switch (adev->ip_versions[MP1_HWIP][0]) {
                case IP_VERSION(11, 0, 7):
@@ -1680,18 +1655,16 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
        } else {
                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
                if (ret)
-                       goto out;
+                       return ret;
 
                /* clear vbios scratch 6 and 7 for coming asic reinit */
                WREG32(adev->bios_scratch_reg_offset + 6, 0);
                WREG32(adev->bios_scratch_reg_offset + 7, 0);
        }
-       if (ret)
-               goto out;
 
-       smu_baco->state = state;
-out:
-       mutex_unlock(&smu_baco->mutex);
+       if (!ret)
+               smu_baco->state = state;
+
        return ret;
 }
 
@@ -1798,7 +1771,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
                                          uint32_t min,
                                          uint32_t max)
 {
-       struct amdgpu_device *adev = smu->adev;
        int ret = 0, clk_id = 0;
        uint32_t param;
 
@@ -1811,9 +1783,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
        if (clk_id < 0)
                return clk_id;
 
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, false);
-
        if (max > 0) {
                param = (uint32_t)((clk_id << 16) | (max & 0xffff));
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1831,9 +1800,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
        }
 
 out:
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, true);
-
        return ret;
 }
 
index 5cb07ed227fb090cdf9b6fdc2dddae1388ea2469..96a5b31f708dc91a84d8b44674b8b211b1eb9cd5 100644 (file)
@@ -273,15 +273,11 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -335,8 +331,6 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -348,15 +342,11 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -410,8 +400,6 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -512,21 +500,17 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
        /* we need to re-init after suspend so return false */
        if (adev->in_suspend)
                return false;
 
-       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
 
        if (ret)
                return false;
 
-       feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
-                               ((uint64_t)feature_mask[1] << 32));
-
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
@@ -1506,7 +1490,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GPU_LOAD:
                ret = vangogh_common_get_smu_metrics_data(smu,
@@ -1568,7 +1551,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -1965,30 +1947,13 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
 static int vangogh_system_features_control(struct smu_context *smu, bool en)
 {
        struct amdgpu_device *adev = smu->adev;
-       struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_mask[2];
        int ret = 0;
 
        if (adev->pm.fw_version >= 0x43f1700 && !en)
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
                                                      RLC_STATUS_OFF, NULL);
 
-       bitmap_zero(feature->enabled, feature->feature_num);
-       bitmap_zero(feature->supported, feature->feature_num);
-
-       if (!en)
-               return ret;
-
-       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
-       if (ret)
-               return ret;
-
-       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-
-       return 0;
+       return ret;
 }
 
 static int vangogh_post_smu_init(struct smu_context *smu)
@@ -2003,7 +1968,7 @@ static int vangogh_post_smu_init(struct smu_context *smu)
                adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
 
        /* allow message will be sent after enable message on Vangogh*/
-       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
                        (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
                if (ret) {
@@ -2196,7 +2161,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
        .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
        .is_dpm_running = vangogh_is_dpm_running,
        .read_sensor = vangogh_read_sensor,
-       .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
        .set_watermarks_table = vangogh_set_watermarks_table,
        .set_driver_table_location = smu_v11_0_set_driver_table_location,
index 25c4b135f8303b2a95149ce0957b398bd6d76a61..e99e7b2bf25b15e5b7ddb8a95925093e6f08985b 100644 (file)
@@ -1128,15 +1128,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_AVERAGE_GFXCLK:
@@ -1201,8 +1197,6 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -1215,7 +1209,6 @@ static int renoir_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GPU_LOAD:
                ret = renoir_get_smu_metrics_data(smu,
@@ -1283,7 +1276,6 @@ static int renoir_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index 9c91e79c955fb24b8905b8b913253580cd69f4c6..56a02bc60ceeedd10daa4fdf5973439f3f1d1341 100644 (file)
@@ -74,15 +74,15 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        uint32_t if_version = 0xff, smu_version = 0xff;
-       uint16_t smu_major;
-       uint8_t smu_minor, smu_debug;
+       uint8_t smu_program, smu_major, smu_minor, smu_debug;
        int ret = 0;
 
        ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
                return ret;
 
-       smu_major = (smu_version >> 16) & 0xffff;
+       smu_program = (smu_version >> 24) & 0xff;
+       smu_major = (smu_version >> 16) & 0xff;
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
        if (smu->is_apu)
@@ -98,9 +98,9 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
         */
        if (if_version != smu->smc_driver_if_version) {
                dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
-                       "smu fw version = 0x%08x (%d.%d.%d)\n",
+                       "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
                        smu->smc_driver_if_version, if_version,
-                       smu_version, smu_major, smu_minor, smu_debug);
+                       smu_program, smu_version, smu_major, smu_minor, smu_debug);
                dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
        }
 
index 4885c4ae78b73dc31edac8da14f5742c049cadeb..890acc4e2cb8f860747f806d5632b958aca69648 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/firmware.h>
 #include "amdgpu.h"
+#include "amdgpu_dpm.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
 #include "smu13_driver_if_aldebaran.h"
 #include "soc15_common.h"
 #include "atom.h"
-#include "power_state.h"
 #include "aldebaran_ppt.h"
 #include "smu_v13_0_pptable.h"
 #include "aldebaran_ppsmc.h"
@@ -57,8 +57,6 @@
 #undef pr_info
 #undef pr_debug
 
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
 #define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
        [smu_feature] = {1, (aldebaran_feature)}
 
@@ -572,15 +570,11 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              NULL,
-                                              false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu,
+                                       NULL,
+                                       false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_CURR_GFXCLK:
@@ -654,8 +648,6 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -1148,7 +1140,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MEM_LOAD:
        case AMDGPU_PP_SENSOR_GPU_LOAD:
@@ -1187,7 +1178,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -1460,32 +1450,34 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
 static bool aldebaran_is_dpm_running(struct smu_context *smu)
 {
        int ret;
-       uint32_t feature_mask[2];
-       unsigned long feature_enabled;
+       uint64_t feature_enabled;
 
-       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
        if (ret)
                return false;
-       feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
-                                         ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
 static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
                              struct i2c_msg *msg, int num_msgs)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
-       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+       struct amdgpu_device *adev = smu_i2c->adev;
+       struct smu_context *smu = adev->powerplay.pp_handle;
+       struct smu_table_context *smu_table = &smu->smu_table;
        struct smu_table *table = &smu_table->driver_table;
        SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
        int i, j, r, c;
        u16 dir;
 
+       if (!adev->pm.dpm_enabled)
+               return -EBUSY;
+
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
                return -ENOMEM;
 
-       req->I2CcontrollerPort = 0;
+       req->I2CcontrollerPort = smu_i2c->port;
        req->I2CSpeed = I2C_SPEED_FAST_400K;
        req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
        dir = msg[0].flags & I2C_M_RD;
@@ -1521,9 +1513,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
                        }
                }
        }
-       mutex_lock(&adev->smu.mutex);
-       r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
-       mutex_unlock(&adev->smu.mutex);
+       mutex_lock(&adev->pm.mutex);
+       r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+       mutex_unlock(&adev->pm.mutex);
        if (r)
                goto fail;
 
@@ -1563,28 +1555,53 @@ static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
        .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
 };
 
-static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int aldebaran_i2c_control_init(struct smu_context *smu)
 {
-       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_device *adev = smu->adev;
+       struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
+       struct i2c_adapter *control = &smu_i2c->adapter;
        int res;
 
+       smu_i2c->adev = adev;
+       smu_i2c->port = 0;
+       mutex_init(&smu_i2c->mutex);
        control->owner = THIS_MODULE;
        control->class = I2C_CLASS_SPD;
        control->dev.parent = &adev->pdev->dev;
        control->algo = &aldebaran_i2c_algo;
-       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
        control->quirks = &aldebaran_i2c_control_quirks;
+       i2c_set_adapdata(control, smu_i2c);
 
        res = i2c_add_adapter(control);
-       if (res)
+       if (res) {
                DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+               goto Out_err;
+       }
+
+       adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+       adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+       return 0;
+Out_err:
+       i2c_del_adapter(control);
 
        return res;
 }
 
-static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void aldebaran_i2c_control_fini(struct smu_context *smu)
 {
-       i2c_del_adapter(control);
+       struct amdgpu_device *adev = smu->adev;
+       int i;
+
+       for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+               struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+               struct i2c_adapter *control = &smu_i2c->adapter;
+
+               i2c_del_adapter(control);
+       }
+       adev->pm.ras_eeprom_i2c_bus = NULL;
+       adev->pm.fru_eeprom_i2c_bus = NULL;
 }
 
 static void aldebaran_get_unique_id(struct smu_context *smu)
@@ -1594,17 +1611,14 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
        uint32_t upper32 = 0, lower32 = 0;
        int ret;
 
-       mutex_lock(&smu->metrics_lock);
-       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+       ret = smu_cmn_get_metrics_table(smu, NULL, false);
        if (ret)
-               goto out_unlock;
+               goto out;
 
        upper32 = metrics->PublicSerialNumUpper32;
        lower32 = metrics->PublicSerialNumLower32;
 
-out_unlock:
-       mutex_unlock(&smu->metrics_lock);
-
+out:
        adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
        if (adev->serial[0] == '\0')
                sprintf(adev->serial, "%016llx", adev->unique_id);
index b54790d3483ef16571c05be739a483296a3865d7..f0ab1dc3ca59fe1849f12e96d825de5c51ae2fdd 100644 (file)
@@ -198,15 +198,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        uint32_t if_version = 0xff, smu_version = 0xff;
-       uint16_t smu_major;
-       uint8_t smu_minor, smu_debug;
+       uint8_t smu_program, smu_major, smu_minor, smu_debug;
        int ret = 0;
 
        ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
                return ret;
 
-       smu_major = (smu_version >> 16) & 0xffff;
+       smu_program = (smu_version >> 24) & 0xff;
+       smu_major = (smu_version >> 16) & 0xff;
        smu_minor = (smu_version >> 8) & 0xff;
        smu_debug = (smu_version >> 0) & 0xff;
        if (smu->is_apu)
@@ -229,8 +229,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
 
        /* only for dGPU w/ SMU13*/
        if (adev->pm.fw)
-               dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
-                        smu_version, smu_major, smu_minor, smu_debug);
+               dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+                        smu_program, smu_version, smu_major, smu_minor, smu_debug);
 
        /*
         * 1. if_version mismatch is not critical as our fw is designed
@@ -242,9 +242,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
         */
        if (if_version != smu->smc_driver_if_version) {
                dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
-                        "smu fw version = 0x%08x (%d.%d.%d)\n",
+                        "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
                         smu->smc_driver_if_version, if_version,
-                        smu_version, smu_major, smu_minor, smu_debug);
+                        smu_program, smu_version, smu_major, smu_minor, smu_debug);
                dev_warn(adev->dev, "SMU driver if version not matched\n");
        }
 
@@ -722,25 +722,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
 
-       mutex_lock(&feature->mutex);
-       if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
-               goto failed;
+       if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+           feature->feature_num < 64)
+               return -EINVAL;
 
        bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
 
        ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
                                              feature_mask[1], NULL);
        if (ret)
-               goto failed;
-
-       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
-                                             feature_mask[0], NULL);
-       if (ret)
-               goto failed;
+               return ret;
 
-failed:
-       mutex_unlock(&feature->mutex);
-       return ret;
+       return smu_cmn_send_smc_msg_with_param(smu,
+                                              SMU_MSG_SetAllowedFeaturesMaskLow,
+                                              feature_mask[0],
+                                              NULL);
 }
 
 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
@@ -768,30 +764,8 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
 int smu_v13_0_system_features_control(struct smu_context *smu,
                                      bool en)
 {
-       struct smu_feature *feature = &smu->smu_feature;
-       uint32_t feature_mask[2];
-       int ret = 0;
-
-       ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
-                                        SMU_MSG_DisableAllSmuFeatures), NULL);
-       if (ret)
-               return ret;
-
-       bitmap_zero(feature->enabled, feature->feature_num);
-       bitmap_zero(feature->supported, feature->feature_num);
-
-       if (en) {
-               ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
-               if (ret)
-                       return ret;
-
-               bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                           feature->feature_num);
-               bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                           feature->feature_num);
-       }
-
-       return ret;
+       return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+                                         SMU_MSG_DisableAllSmuFeatures), NULL);
 }
 
 int smu_v13_0_notify_display_change(struct smu_context *smu)
@@ -1200,7 +1174,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
                                   unsigned tyep,
                                   enum amdgpu_interrupt_state state)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        uint32_t low, high;
        uint32_t val = 0;
 
@@ -1275,7 +1249,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
 {
-       struct smu_context *smu = &adev->smu;
+       struct smu_context *smu = adev->powerplay.pp_handle;
        uint32_t client_id = entry->client_id;
        uint32_t src_id = entry->src_id;
        /*
@@ -1321,11 +1295,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                        switch (ctxid) {
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
-                               smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+                               smu_v13_0_ack_ac_dc_interrupt(smu);
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
-                               smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+                               smu_v13_0_ack_ac_dc_interrupt(smu);
                                break;
                        case 0x7:
                                /*
@@ -1533,7 +1507,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
                                          uint32_t min,
                                          uint32_t max)
 {
-       struct amdgpu_device *adev = smu->adev;
        int ret = 0, clk_id = 0;
        uint32_t param;
 
@@ -1546,9 +1519,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
        if (clk_id < 0)
                return clk_id;
 
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, false);
-
        if (max > 0) {
                param = (uint32_t)((clk_id << 16) | (max & 0xffff));
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1566,9 +1536,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
        }
 
 out:
-       if (clk_type == SMU_GFXCLK)
-               amdgpu_gfx_off_ctrl(adev, true);
-
        return ret;
 }
 
index caf1775d48ef6ab670fb6291fe3dfc87a884be39..e90387a84cbb539efb0aee5b19eff1d7a097393f 100644 (file)
@@ -195,30 +195,13 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
 
 static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        struct amdgpu_device *adev = smu->adev;
-       uint32_t feature_mask[2];
        int ret = 0;
 
        if (!en && !adev->in_s0ix)
                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
 
-       bitmap_zero(feature->enabled, feature->feature_num);
-       bitmap_zero(feature->supported, feature->feature_num);
-
-       if (!en)
-               return ret;
-
-       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
-       if (ret)
-               return ret;
-
-       bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-       bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
-                   feature->feature_num);
-
-       return 0;
+       return ret;
 }
 
 static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
@@ -255,16 +238,13 @@ static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 static bool yellow_carp_is_dpm_running(struct smu_context *smu)
 {
        int ret = 0;
-       uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
-       ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
 
        if (ret)
                return false;
 
-       feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
@@ -310,13 +290,9 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
        SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-
-       ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
-       if (ret) {
-               mutex_unlock(&smu->metrics_lock);
+       ret = smu_cmn_get_metrics_table(smu, NULL, false);
+       if (ret)
                return ret;
-       }
 
        switch (member) {
        case METRICS_AVERAGE_GFXCLK:
@@ -387,8 +363,6 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
                break;
        }
 
-       mutex_unlock(&smu->metrics_lock);
-
        return ret;
 }
 
@@ -401,7 +375,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
-       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GPU_LOAD:
                ret = yellow_carp_get_smu_metrics_data(smu,
@@ -469,7 +442,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
                ret = -EOPNOTSUPP;
                break;
        }
-       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
@@ -1182,7 +1154,7 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
        .is_dpm_running = yellow_carp_is_dpm_running,
        .set_watermarks_table = yellow_carp_set_watermarks_table,
        .get_gpu_metrics = yellow_carp_get_gpu_metrics,
-       .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
        .set_driver_table_location = smu_v13_0_set_driver_table_location,
        .gfx_off_control = smu_v13_0_gfx_off_control,
index ee1a312fd497f4a7d8f03bffd0030ffde3448eba..ea6c2dab5ecc014ef20906b24dc1dfca2e8ed6d3 100644 (file)
@@ -481,7 +481,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
 {
        struct smu_feature *feature = &smu->smu_feature;
        int feature_id;
-       int ret = 0;
 
        feature_id = smu_cmn_to_asic_specific_index(smu,
                                                    CMN2ASIC_MAPPING_FEATURE,
@@ -491,22 +490,27 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
 
        WARN_ON(feature_id > feature->feature_num);
 
-       mutex_lock(&feature->mutex);
-       ret = test_bit(feature_id, feature->supported);
-       mutex_unlock(&feature->mutex);
-
-       return ret;
+       return test_bit(feature_id, feature->supported);
 }
 
 int smu_cmn_feature_is_enabled(struct smu_context *smu,
                               enum smu_feature_mask mask)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        struct amdgpu_device *adev = smu->adev;
+       uint64_t enabled_features;
        int feature_id;
-       int ret = 0;
 
-       if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
+       if (smu_cmn_get_enabled_mask(smu, &enabled_features)) {
+               dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
+               return 0;
+       }
+
+       /*
+        * For Renoir and Cyan Skillfish, they are assumed to have all features
+        * enabled. Also considering they have no feature_map available, the
+        * check here can avoid unwanted feature_map check below.
+        */
+       if (enabled_features == ULLONG_MAX)
                return 1;
 
        feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -515,13 +519,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
        if (feature_id < 0)
                return 0;
 
-       WARN_ON(feature_id > feature->feature_num);
-
-       mutex_lock(&feature->mutex);
-       ret = test_bit(feature_id, feature->enabled);
-       mutex_unlock(&feature->mutex);
-
-       return ret;
+       return test_bit(feature_id, (unsigned long *)&enabled_features);
 }
 
 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
@@ -552,70 +550,61 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
 }
 
 int smu_cmn_get_enabled_mask(struct smu_context *smu,
-                            uint32_t *feature_mask,
-                            uint32_t num)
+                            uint64_t *feature_mask)
 {
-       uint32_t feature_mask_high = 0, feature_mask_low = 0;
-       struct smu_feature *feature = &smu->smu_feature;
-       int ret = 0;
-
-       if (!feature_mask || num < 2)
-               return -EINVAL;
-
-       if (bitmap_empty(feature->enabled, feature->feature_num)) {
-               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
-               if (ret)
-                       return ret;
-
-               ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
-               if (ret)
-                       return ret;
-
-               feature_mask[0] = feature_mask_low;
-               feature_mask[1] = feature_mask_high;
-       } else {
-               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
-                            feature->feature_num);
-       }
-
-       return ret;
-}
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
-                                       uint32_t *feature_mask,
-                                       uint32_t num)
-{
-       uint32_t feature_mask_en_low = 0;
-       uint32_t feature_mask_en_high = 0;
-       struct smu_feature *feature = &smu->smu_feature;
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t *feature_mask_high;
+       uint32_t *feature_mask_low;
        int ret = 0;
 
-       if (!feature_mask || num < 2)
+       if (!feature_mask)
                return -EINVAL;
 
-       if (bitmap_empty(feature->enabled, feature->feature_num)) {
-               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
-                                                                                &feature_mask_en_low);
+       feature_mask_low = &((uint32_t *)feature_mask)[0];
+       feature_mask_high = &((uint32_t *)feature_mask)[1];
 
+       switch (adev->ip_versions[MP1_HWIP][0]) {
+       /* For Vangogh and Yellow Carp */
+       case IP_VERSION(11, 5, 0):
+       case IP_VERSION(13, 0, 1):
+       case IP_VERSION(13, 0, 3):
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_GetEnabledSmuFeatures,
+                                                     0,
+                                                     feature_mask_low);
                if (ret)
                        return ret;
 
-               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
-                                                                                &feature_mask_en_high);
-
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                     SMU_MSG_GetEnabledSmuFeatures,
+                                                     1,
+                                                     feature_mask_high);
+               break;
+       /*
+        * For Cyan Skillfish and Renoir, there is no interface provided by PMFW
+        * to retrieve the enabled features. So, we assume all features are enabled.
+        * TODO: add other APU ASICs which suffer from the same issue here
+        */
+       case IP_VERSION(11, 0, 8):
+       case IP_VERSION(12, 0, 0):
+       case IP_VERSION(12, 0, 1):
+               memset(feature_mask, 0xff, sizeof(*feature_mask));
+               break;
+       /* other dGPU ASICs */
+       default:
+               ret = smu_cmn_send_smc_msg(smu,
+                                          SMU_MSG_GetEnabledSmuFeaturesHigh,
+                                          feature_mask_high);
                if (ret)
                        return ret;
 
-               feature_mask[0] = feature_mask_en_low;
-               feature_mask[1] = feature_mask_en_high;
-
-       } else {
-               bitmap_copy((unsigned long *)feature_mask, feature->enabled,
-                                feature->feature_num);
+               ret = smu_cmn_send_smc_msg(smu,
+                                          SMU_MSG_GetEnabledSmuFeaturesLow,
+                                          feature_mask_low);
+               break;
        }
 
        return ret;
-
 }
 
 uint64_t smu_cmn_get_indep_throttler_status(
@@ -635,7 +624,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
                                        uint64_t feature_mask,
                                        bool enabled)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        int ret = 0;
 
        if (enabled) {
@@ -649,8 +637,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
                                                  SMU_MSG_EnableSmuFeaturesHigh,
                                                  upper_32_bits(feature_mask),
                                                  NULL);
-               if (ret)
-                       return ret;
        } else {
                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                  SMU_MSG_DisableSmuFeaturesLow,
@@ -662,19 +648,8 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
                                                  SMU_MSG_DisableSmuFeaturesHigh,
                                                  upper_32_bits(feature_mask),
                                                  NULL);
-               if (ret)
-                       return ret;
        }
 
-       mutex_lock(&feature->mutex);
-       if (enabled)
-               bitmap_or(feature->enabled, feature->enabled,
-                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-       else
-               bitmap_andnot(feature->enabled, feature->enabled,
-                               (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
-       mutex_unlock(&feature->mutex);
-
        return ret;
 }
 
@@ -682,7 +657,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
                                enum smu_feature_mask mask,
                                bool enable)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        int feature_id;
 
        feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -691,8 +665,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
        if (feature_id < 0)
                return -EINVAL;
 
-       WARN_ON(feature_id > feature->feature_num);
-
        return smu_cmn_feature_update_enable_state(smu,
                                               1ULL << feature_id,
                                               enable);
@@ -715,29 +687,21 @@ static const char *smu_get_feature_name(struct smu_context *smu,
 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
                                   char *buf)
 {
-       uint32_t feature_mask[2] = { 0 };
+       uint64_t feature_mask;
        int feature_index = 0;
        uint32_t count = 0;
        int8_t sort_feature[SMU_FEATURE_COUNT];
        size_t size = 0;
        int ret = 0, i;
+       int feature_id;
 
-       if (!smu->is_apu) {
-               ret = smu_cmn_get_enabled_mask(smu,
-                                               feature_mask,
-                                               2);
-               if (ret)
-                       return 0;
-       } else {
-               ret = smu_cmn_get_enabled_32_bits_mask(smu,
-                                       feature_mask,
-                                       2);
-               if (ret)
-                       return 0;
-       }
+       ret = smu_cmn_get_enabled_mask(smu,
+                                      &feature_mask);
+       if (ret)
+               return 0;
 
        size =  sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
-                       feature_mask[1], feature_mask[0]);
+                       upper_32_bits(feature_mask), lower_32_bits(feature_mask));
 
        memset(sort_feature, -1, sizeof(sort_feature));
 
@@ -758,11 +722,18 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
                if (sort_feature[i] < 0)
                        continue;
 
+               /* convert to asic spcific feature ID */
+               feature_id = smu_cmn_to_asic_specific_index(smu,
+                                                           CMN2ASIC_MAPPING_FEATURE,
+                                                           sort_feature[i]);
+               if (feature_id < 0)
+                       continue;
+
                size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
                                count++,
                                smu_get_feature_name(smu, sort_feature[i]),
                                i,
-                               !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+                               !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
                                "enabled" : "disabled");
        }
 
@@ -773,22 +744,17 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
                                uint64_t new_mask)
 {
        int ret = 0;
-       uint32_t feature_mask[2] = { 0 };
+       uint64_t feature_mask;
        uint64_t feature_2_enabled = 0;
        uint64_t feature_2_disabled = 0;
-       uint64_t feature_enables = 0;
 
        ret = smu_cmn_get_enabled_mask(smu,
-                                      feature_mask,
-                                      2);
+                                      &feature_mask);
        if (ret)
                return ret;
 
-       feature_enables = ((uint64_t)feature_mask[1] << 32 |
-                          (uint64_t)feature_mask[0]);
-
-       feature_2_enabled  = ~feature_enables & new_mask;
-       feature_2_disabled = feature_enables & ~new_mask;
+       feature_2_enabled  = ~feature_mask & new_mask;
+       feature_2_disabled = feature_mask & ~new_mask;
 
        if (feature_2_enabled) {
                ret = smu_cmn_feature_update_enable_state(smu,
@@ -814,9 +780,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
  *                                               @mask
  *
  * @smu:               smu_context pointer
- * @no_hw_disablement: whether real dpm disablement should be performed
- *                     true: update the cache(about dpm enablement state) only
- *                     false: real dpm disablement plus cache update
  * @mask:              the dpm feature which should not be disabled
  *                     SMU_FEATURE_COUNT: no exception, all dpm features
  *                     to disable
@@ -825,10 +788,8 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
  * 0 on success or a negative error code on failure.
  */
 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
-                                               bool no_hw_disablement,
                                                enum smu_feature_mask mask)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        uint64_t features_to_disable = U64_MAX;
        int skipped_feature_id;
 
@@ -842,18 +803,9 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
                features_to_disable &= ~(1ULL << skipped_feature_id);
        }
 
-       if (no_hw_disablement) {
-               mutex_lock(&feature->mutex);
-               bitmap_andnot(feature->enabled, feature->enabled,
-                               (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
-               mutex_unlock(&feature->mutex);
-
-               return 0;
-       } else {
-               return smu_cmn_feature_update_enable_state(smu,
-                                                          features_to_disable,
-                                                          0);
-       }
+       return smu_cmn_feature_update_enable_state(smu,
+                                                  features_to_disable,
+                                                  0);
 }
 
 int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -964,9 +916,9 @@ int smu_cmn_write_pptable(struct smu_context *smu)
                                    true);
 }
 
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
-                                    void *metrics_table,
-                                    bool bypass_cache)
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+                             void *metrics_table,
+                             bool bypass_cache)
 {
        struct smu_table_context *smu_table= &smu->smu_table;
        uint32_t table_size =
@@ -994,21 +946,6 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
        return 0;
 }
 
-int smu_cmn_get_metrics_table(struct smu_context *smu,
-                             void *metrics_table,
-                             bool bypass_cache)
-{
-       int ret = 0;
-
-       mutex_lock(&smu->metrics_lock);
-       ret = smu_cmn_get_metrics_table_locked(smu,
-                                              metrics_table,
-                                              bypass_cache);
-       mutex_unlock(&smu->metrics_lock);
-
-       return ret;
-}
-
 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
 {
        struct metrics_table_header *header = (struct metrics_table_header *)table;
index beea03810bcaabc476e682446c5fc35973ed9dba..a4c593ed8b0341f45647a34271379a66dd3cf98a 100644 (file)
 #include "amdgpu_smu.h"
 
 #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+
+#define FDO_PWM_MODE_STATIC  1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
                                     uint16_t msg_index,
                                     uint32_t param);
@@ -54,12 +58,7 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
                                enum smu_clk_type clk_type);
 
 int smu_cmn_get_enabled_mask(struct smu_context *smu,
-                            uint32_t *feature_mask,
-                            uint32_t num);
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
-                                       uint32_t *feature_mask,
-                                       uint32_t num);
+                            uint64_t *feature_mask);
 
 uint64_t smu_cmn_get_indep_throttler_status(
                                        const unsigned long dep_status,
@@ -80,7 +79,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
                                uint64_t new_mask);
 
 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
-                                               bool no_hw_disablement,
                                                enum smu_feature_mask mask);
 
 int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -97,10 +95,6 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu);
 
 int smu_cmn_write_pptable(struct smu_context *smu);
 
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
-                                    void *metrics_table,
-                                    bool bypass_cache);
-
 int smu_cmn_get_metrics_table(struct smu_context *smu,
                              void *metrics_table,
                              bool bypass_cache);
index 59f9cfff3d61515d118e4dc2f8fb66d594721ec0..15bcf72b8e566784d7dd642478eea9eb59f2e03c 100644 (file)
@@ -55,9 +55,9 @@
 #define smu_send_smc_msg(smu, msg, read_arg)                           smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
 #define smu_init_display_count(smu, count)                             smu_ppt_funcs(init_display_count, 0, smu, count)
 #define smu_feature_set_allowed_mask(smu)                              smu_ppt_funcs(set_allowed_mask, 0, smu)
-#define smu_feature_get_enabled_mask(smu, mask, num)                   smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
+#define smu_feature_get_enabled_mask(smu, mask)                                smu_ppt_funcs(get_enabled_mask, -EOPNOTSUPP, smu, mask)
 #define smu_feature_is_enabled(smu, mask)                              smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
-#define smu_disable_all_features_with_exception(smu, no_hw_disablement, mask)          smu_ppt_funcs(disable_all_features_with_exception, 0, smu, no_hw_disablement, mask)
+#define smu_disable_all_features_with_exception(smu, mask)             smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
 #define smu_is_dpm_running(smu)                                                smu_ppt_funcs(is_dpm_running, 0 , smu)
 #define smu_notify_display_change(smu)                                 smu_ppt_funcs(notify_display_change, 0, smu)
 #define smu_populate_umd_state_clk(smu)                                        smu_ppt_funcs(populate_umd_state_clk, 0, smu)
@@ -78,8 +78,8 @@
 #define smu_dump_pptable(smu)                                          smu_ppt_funcs(dump_pptable, 0, smu)
 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap)  smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
 #define smu_set_power_source(smu, power_src)                           smu_ppt_funcs(set_power_source, 0, smu, power_src)
-#define smu_i2c_init(smu, control)                                     smu_ppt_funcs(i2c_init, 0, smu, control)
-#define smu_i2c_fini(smu, control)                                     smu_ppt_funcs(i2c_fini, 0, smu, control)
+#define smu_i2c_init(smu)                                               smu_ppt_funcs(i2c_init, 0, smu)
+#define smu_i2c_fini(smu)                                               smu_ppt_funcs(i2c_fini, 0, smu)
 #define smu_get_unique_id(smu)                                         smu_ppt_funcs(get_unique_id, 0, smu)
 #define smu_log_thermal_throttling(smu)                                        smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
 #define smu_get_asic_power_limits(smu, current, default, max)          smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
index 81b4de7be9f2b2710fd437ff805330aa8b366589..5819737c21c678d3926ef8f9570fa05577123689 100644 (file)
@@ -8517,8 +8517,8 @@ int cik_suspend(struct radeon_device *rdev)
        cik_cp_enable(rdev, false);
        cik_sdma_enable(rdev, false);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        if (rdev->has_vce)
                radeon_vce_suspend(rdev);
index eeb590d2dec2e7a13644c9f28451f0dbfaea57c4..455f8036aa54acea5c36a57f25bb386206f8d291 100644 (file)
@@ -5156,8 +5156,8 @@ int evergreen_suspend(struct radeon_device *rdev)
        radeon_pm_suspend(rdev);
        radeon_audio_fini(rdev);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
index 4a364ca7a1be71ae07beaa7de875e511a64272c8..927e5f42e97d018240b5204c9d0dd7339391292a 100644 (file)
@@ -2323,8 +2323,8 @@ int cayman_suspend(struct radeon_device *rdev)
        cayman_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
index ca3fcae2adb537539042b3e906659f8bd973a1e9..dd78fc4994024815e0758ad73aef807693a42496 100644 (file)
@@ -3232,8 +3232,8 @@ int r600_suspend(struct radeon_device *rdev)
        radeon_audio_fini(rdev);
        r600_cp_stop(rdev);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
index 4f0fbf66743160e643244a568de399e0ae95a4a7..15692cb241fc01d6b2c92de8ae97dc9eeecd3c83 100644 (file)
@@ -1085,19 +1085,6 @@ static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
-/**
- * radeon_check_pot_argument - check that argument is a power of two
- *
- * @arg: value to check
- *
- * Validates that a certain argument is a power of two (all asics).
- * Returns true if argument is valid.
- */
-static bool radeon_check_pot_argument(int arg)
-{
-       return (arg & (arg - 1)) == 0;
-}
-
 /**
  * radeon_gart_size_auto - Determine a sensible default GART size
  *                         according to ASIC family.
@@ -1126,7 +1113,7 @@ static int radeon_gart_size_auto(enum radeon_family family)
 static void radeon_check_arguments(struct radeon_device *rdev)
 {
        /* vramlimit must be a power of two */
-       if (!radeon_check_pot_argument(radeon_vram_limit)) {
+       if (!is_power_of_2(radeon_vram_limit)) {
                dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
                                radeon_vram_limit);
                radeon_vram_limit = 0;
@@ -1140,7 +1127,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
                dev_warn(rdev->dev, "gart size (%d) too small\n",
                                radeon_gart_size);
                radeon_gart_size = radeon_gart_size_auto(rdev->family);
-       } else if (!radeon_check_pot_argument(radeon_gart_size)) {
+       } else if (!is_power_of_2(radeon_gart_size)) {
                dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
                                radeon_gart_size);
                radeon_gart_size = radeon_gart_size_auto(rdev->family);
@@ -1163,7 +1150,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
                break;
        }
 
-       if (!radeon_check_pot_argument(radeon_vm_size)) {
+       if (!is_power_of_2(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
                radeon_vm_size = 4;
index 56ede9d63b12c5098989d5706db7cd1ae048348f..87536d205593828cdc322318ade7d9192e627f18 100644 (file)
@@ -567,7 +567,6 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
                return 0;
 
        if (bo->surface_reg >= 0) {
-               reg = &rdev->surface_regs[bo->surface_reg];
                i = bo->surface_reg;
                goto out;
        }
index 377f9cdb5b53fff3aa09985b007f99d214776859..0558d928d98db28f7adca3c8d314e1a934b09b9c 100644 (file)
@@ -497,6 +497,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
        handle = msg[2];
 
        if (handle == 0) {
+               radeon_bo_kunmap(bo);
                DRM_ERROR("Invalid UVD handle!\n");
                return -EINVAL;
        }
@@ -559,12 +560,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return 0;
 
        default:
-
                DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-               return -EINVAL;
        }
 
-       BUG();
+       radeon_bo_kunmap(bo);
        return -EINVAL;
 }
 
index e592e57be1bb48da897378cf9359de7a3fd83901..38796af4fadd4d95478dc81ee5512b686aeade19 100644 (file)
@@ -1894,8 +1894,8 @@ int rv770_suspend(struct radeon_device *rdev)
        radeon_pm_suspend(rdev);
        radeon_audio_fini(rdev);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        r700_cp_stop(rdev);
        r600_dma_stop(rdev);
index 013e44ed0f39a6f34ea4367f7a5640a0734adf96..8d5e4b25609d5f54571818ba064080bd6bcb9107 100644 (file)
@@ -6800,8 +6800,8 @@ int si_suspend(struct radeon_device *rdev)
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
        if (rdev->has_uvd) {
-               uvd_v1_0_fini(rdev);
                radeon_uvd_suspend(rdev);
+               uvd_v1_0_fini(rdev);
        }
        if (rdev->has_vce)
                radeon_vce_suspend(rdev);
index 0b94ec7b73e78883ad4ef54ccf6842bd19e4c58a..76b580d10a52877b3fab0c554f0273ecfd4aafab 100644 (file)
@@ -206,6 +206,8 @@ union drm_amdgpu_bo_list {
 #define AMDGPU_CTX_OP_FREE_CTX 2
 #define AMDGPU_CTX_OP_QUERY_STATE      3
 #define AMDGPU_CTX_OP_QUERY_STATE2     4
+#define AMDGPU_CTX_OP_GET_STABLE_PSTATE        5
+#define AMDGPU_CTX_OP_SET_STABLE_PSTATE        6
 
 /* GPU reset status */
 #define AMDGPU_CTX_NO_RESET            0
@@ -238,10 +240,18 @@ union drm_amdgpu_bo_list {
 #define AMDGPU_CTX_PRIORITY_HIGH        512
 #define AMDGPU_CTX_PRIORITY_VERY_HIGH   1023
 
+/* select a stable profiling pstate for perfmon tools */
+#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK  0xf
+#define AMDGPU_CTX_STABLE_PSTATE_NONE  0
+#define AMDGPU_CTX_STABLE_PSTATE_STANDARD  1
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK  2
+#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK  3
+#define AMDGPU_CTX_STABLE_PSTATE_PEAK  4
+
 struct drm_amdgpu_ctx_in {
        /** AMDGPU_CTX_OP_* */
        __u32   op;
-       /** For future use, no flags defined so far */
+       /** Flags */
        __u32   flags;
        __u32   ctx_id;
        /** AMDGPU_CTX_PRIORITY_* */
@@ -262,6 +272,11 @@ union drm_amdgpu_ctx_out {
                        /** Reset status since the last call of the ioctl. */
                        __u32   reset_status;
                } state;
+
+               struct {
+                       __u32   flags;
+                       __u32   _pad;
+               } pstate;
 };
 
 union drm_amdgpu_ctx {
@@ -728,6 +743,8 @@ struct drm_amdgpu_cs_chunk_data {
        #define AMDGPU_INFO_FW_DMCUB            0x14
        /* Subquery id: Query TOC firmware version */
        #define AMDGPU_INFO_FW_TOC              0x15
+       /* Subquery id: Query CAP firmware version */
+       #define AMDGPU_INFO_FW_CAP              0x16
 
 /* number of bytes moved for TTM migration */
 #define AMDGPU_INFO_NUM_BYTES_MOVED            0x0f
index af96af174dc47b2510a962cbe4e784c509db7163..6e4268f5e48206d1d6610b58ed0c2093bad301ee 100644 (file)
  * - 1.4 - Indicate new SRAM EDC bit in device properties
  * - 1.5 - Add SVM API
  * - 1.6 - Query clear flags in SVM get_attr API
+ * - 1.7 - Checkpoint Restore (CRIU) API
  */
 #define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 6
+#define KFD_IOCTL_MINOR_VERSION 7
 
 struct kfd_ioctl_get_version_args {
        __u32 major_version;    /* from KFD */
@@ -468,6 +469,82 @@ struct kfd_ioctl_smi_events_args {
        __u32 anon_fd;  /* from KFD */
 };
 
+/**************************************************************************************************
+ * CRIU IOCTLs (Checkpoint Restore In Userspace)
+ *
+ * When checkpointing a process, the userspace application will perform:
+ * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
+ *    all the queues.
+ * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
+ * 3. UNPAUSE op to un-evict all the queues
+ *
+ * When restoring a process, the CRIU userspace application will perform:
+ *
+ * 1. RESTORE op to restore process contents
+ * 2. RESUME op to start the process
+ *
+ * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
+ * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
+ */
+
+enum kfd_criu_op {
+       KFD_CRIU_OP_PROCESS_INFO,
+       KFD_CRIU_OP_CHECKPOINT,
+       KFD_CRIU_OP_UNPAUSE,
+       KFD_CRIU_OP_RESTORE,
+       KFD_CRIU_OP_RESUME,
+};
+
+/**
+ * kfd_ioctl_criu_args - Arguments perform CRIU operation
+ * @devices:           [in/out] User pointer to memory location for devices information.
+ *                     This is an array of type kfd_criu_device_bucket.
+ * @bos:               [in/out] User pointer to memory location for BOs information
+ *                     This is an array of type kfd_criu_bo_bucket.
+ * @priv_data:         [in/out] User pointer to memory location for private data
+ * @priv_data_size:    [in/out] Size of priv_data in bytes
+ * @num_devices:       [in/out] Number of GPUs used by process. Size of @devices array.
+ * @num_bos            [in/out] Number of BOs used by process. Size of @bos array.
+ * @num_objects:       [in/out] Number of objects used by process. Objects are opaque to
+ *                              user application.
+ * @pid:               [in/out] PID of the process being checkpointed
+ * @op                 [in] Type of operation (kfd_criu_op)
+ *
+ * Return: 0 on success, -errno on failure
+ */
+struct kfd_ioctl_criu_args {
+       __u64 devices;          /* Used during ops: CHECKPOINT, RESTORE */
+       __u64 bos;              /* Used during ops: CHECKPOINT, RESTORE */
+       __u64 priv_data;        /* Used during ops: CHECKPOINT, RESTORE */
+       __u64 priv_data_size;   /* Used during ops: PROCESS_INFO, RESTORE */
+       __u32 num_devices;      /* Used during ops: PROCESS_INFO, RESTORE */
+       __u32 num_bos;          /* Used during ops: PROCESS_INFO, RESTORE */
+       __u32 num_objects;      /* Used during ops: PROCESS_INFO, RESTORE */
+       __u32 pid;              /* Used during ops: PROCESS_INFO, RESUME */
+       __u32 op;
+};
+
+struct kfd_criu_device_bucket {
+       __u32 user_gpu_id;
+       __u32 actual_gpu_id;
+       __u32 drm_fd;
+       __u32 pad;
+};
+
+struct kfd_criu_bo_bucket {
+       __u64 addr;
+       __u64 size;
+       __u64 offset;
+       __u64 restored_offset;    /* During restore, updated offset for BO */
+       __u32 gpu_id;             /* This is the user_gpu_id */
+       __u32 alloc_flags;
+       __u32 dmabuf_fd;
+       __u32 pad;
+};
+
+/* CRIU IOCTLs - END */
+/**************************************************************************************************/
+
 /* Register offset inside the remapped mmio page
  */
 enum kfd_mmio_remap {
@@ -679,16 +756,16 @@ struct kfd_ioctl_set_xnack_mode_args {
 #define AMDKFD_IOC_WAIT_EVENTS                 \
                AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
 
-#define AMDKFD_IOC_DBG_REGISTER                        \
+#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED     \
                AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
 
-#define AMDKFD_IOC_DBG_UNREGISTER              \
+#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED   \
                AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
 
-#define AMDKFD_IOC_DBG_ADDRESS_WATCH           \
+#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED        \
                AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
 
-#define AMDKFD_IOC_DBG_WAVE_CONTROL            \
+#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
                AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
 
 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA      \
@@ -742,7 +819,10 @@ struct kfd_ioctl_set_xnack_mode_args {
 #define AMDKFD_IOC_SET_XNACK_MODE              \
                AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
 
+#define AMDKFD_IOC_CRIU_OP                     \
+               AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
+
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x22
+#define AMDKFD_COMMAND_END             0x23
 
 #endif