Merge tag 'amd-drm-next-5.13-2021-04-12' of https://gitlab.freedesktop.org/agd5f...
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 13 Apr 2021 10:25:16 +0000 (12:25 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 13 Apr 2021 10:25:17 +0000 (12:25 +0200)
amd-drm-next-5.13-2021-04-12:

amdgpu:
- Re-enable GPU reset on VanGogh
- Enable DPM flags for SMART_SUSPEND and MAY_SKIP_RESUME
- Disentangle HG from vga_switcheroo
- S0ix fixes
- W=1 fixes
- Resource iterator fixes
- DMCUB updates
- UBSAN fixes
- More PM API cleanup
- Aldebaran updates
- Modifier fixes
- Enable VCN load balancing with asymmetric engines
- Rework BO structs
- Aldebaran reset support
- Initial LTTPR display work
- Display MALL fixes
- Fall back to YCbCr420 when YCbCr444 fails
- SR-IOV fixes
- RAS updates
- Misc cleanups and fixes

radeon:
- Typo fixes
- Fix error handling for firmware on r6xx
- Fix a missing check in DP MST handling

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210412220732.3845-1-alexander.deucher@amd.com
251 files changed:
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/aldebaran.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/aldebaran.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/umc_v6_7.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/logger_types.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/scheduler/sched_main.c
include/drm/gpu_scheduler.h

index 741b68874e5388b9fe3a88af127487d9d7432270..ee85e8aba6360e546e467af43c822c3e5044f0c0 100644 (file)
@@ -71,7 +71,7 @@ amdgpu-y += \
        vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
        vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
        arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
-       nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o
+       nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
 
 # add DF block
 amdgpu-y += \
@@ -88,7 +88,7 @@ amdgpu-y += \
 
 # add UMC block
 amdgpu-y += \
-       umc_v6_1.o umc_v6_0.o umc_v8_7.o
+       umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o
 
 # add IH block
 amdgpu-y += \
@@ -179,9 +179,14 @@ amdgpu-y += \
        smuio_v11_0_6.o \
        smuio_v13_0.o
 
+# add reset block
+amdgpu-y += \
+       amdgpu_reset.o
+
 # add amdkfd interfaces
 amdgpu-y += amdgpu_amdkfd.o
 
+
 ifneq ($(CONFIG_HSA_AMD),)
 AMDKFD_PATH := ../amdkfd
 include $(FULL_AMD_PATH)/amdkfd/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
new file mode 100644 (file)
index 0000000..65b1dca
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "aldebaran.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+                           struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_reset_handler *handler;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       if (reset_context->method != AMD_RESET_METHOD_NONE) {
+               dev_dbg(adev->dev, "Getting reset handler for method %d\n",
+                       reset_context->method);
+               list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                                    handler_list) {
+                       if (handler->reset_method == reset_context->method)
+                               return handler;
+               }
+       }
+
+       if (adev->gmc.xgmi.connected_to_cpu) {
+               list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                                    handler_list) {
+                       if (handler->reset_method == AMD_RESET_METHOD_MODE2) {
+                               reset_context->method = AMD_RESET_METHOD_MODE2;
+                               return handler;
+                       }
+               }
+       }
+
+       dev_dbg(adev->dev, "Reset handler not found!\n");
+
+       return NULL;
+}
+
+static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
+
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+               if (r) {
+                       dev_err(adev->dev,
+                               "suspend of IP block <%s> failed %d\n",
+                               adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
+               }
+
+               adev->ip_blocks[i].status.hw = false;
+       }
+
+       return r;
+}
+
+static int
+aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+                                 struct amdgpu_reset_context *reset_context)
+{
+       int r = 0;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       dev_dbg(adev->dev, "Aldebaran prepare hw context\n");
+       /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+       if (!amdgpu_sriov_vf(adev))
+               r = aldebaran_mode2_suspend_ip(adev);
+
+       return r;
+}
+
+static void aldebaran_async_reset(struct work_struct *work)
+{
+       struct amdgpu_reset_handler *handler;
+       struct amdgpu_reset_control *reset_ctl =
+               container_of(work, struct amdgpu_reset_control, reset_work);
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                            handler_list) {
+               if (handler->reset_method == reset_ctl->active_reset) {
+                       dev_dbg(adev->dev, "Resetting device\n");
+                       handler->do_reset(adev);
+                       break;
+               }
+       }
+}
+
+static int aldebaran_mode2_reset(struct amdgpu_device *adev)
+{
+       /* disable BM */
+       pci_clear_master(adev->pdev);
+       adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+       return adev->asic_reset_res;
+}
+
+static int
+aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+                             struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_device *tmp_adev = NULL;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+       int r = 0;
+
+       dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+       if (reset_context->hive == NULL) {
+               /* Wrong context, return error */
+               return -EINVAL;
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+               tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
+       }
+       /*
+        * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
+        * them together so that they can be completed asynchronously on multiple nodes
+        */
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               /* For XGMI run all resets in parallel to speed up the process */
+               if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                       if (!queue_work(system_unbound_wq,
+                                       &tmp_adev->reset_cntl->reset_work))
+                               r = -EALREADY;
+               } else
+                       r = aldebaran_mode2_reset(tmp_adev);
+               if (r) {
+                       dev_err(tmp_adev->dev,
+                               "ASIC reset failed with error, %d for drm dev, %s",
+                               r, adev_to_drm(tmp_adev)->unique);
+                       break;
+               }
+       }
+
+       /* For XGMI wait for all resets to complete before proceed */
+       if (!r) {
+               list_for_each_entry(tmp_adev,
+                                    &reset_context->hive->device_list,
+                                    gmc.xgmi.head) {
+                       if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                               flush_work(&tmp_adev->reset_cntl->reset_work);
+                               r = tmp_adev->asic_reset_res;
+                               if (r)
+                                       break;
+                       }
+               }
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+               tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+       }
+
+       return r;
+}
+
+static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
+{
+       struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+       struct amdgpu_firmware_info *ucode;
+       struct amdgpu_ip_block *cmn_block;
+       int ucode_count = 0;
+       int i, r;
+
+       dev_dbg(adev->dev, "Reloading ucodes after reset\n");
+       for (i = 0; i < adev->firmware.max_ucodes; i++) {
+               ucode = &adev->firmware.ucode[i];
+               if (!ucode->fw)
+                       continue;
+               switch (ucode->ucode_id) {
+               case AMDGPU_UCODE_ID_SDMA0:
+               case AMDGPU_UCODE_ID_SDMA1:
+               case AMDGPU_UCODE_ID_SDMA2:
+               case AMDGPU_UCODE_ID_SDMA3:
+               case AMDGPU_UCODE_ID_SDMA4:
+               case AMDGPU_UCODE_ID_SDMA5:
+               case AMDGPU_UCODE_ID_SDMA6:
+               case AMDGPU_UCODE_ID_SDMA7:
+               case AMDGPU_UCODE_ID_CP_MEC1:
+               case AMDGPU_UCODE_ID_CP_MEC1_JT:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+               case AMDGPU_UCODE_ID_RLC_G:
+                       ucode_list[ucode_count++] = ucode;
+                       break;
+               default:
+                       break;
+               };
+       }
+
+       /* Reinit NBIF block */
+       cmn_block =
+               amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON);
+       if (unlikely(!cmn_block)) {
+               dev_err(adev->dev, "Failed to get BIF handle\n");
+               return -EINVAL;
+       }
+       r = cmn_block->version->funcs->resume(adev);
+       if (r)
+               return r;
+
+       /* Reinit GFXHUB */
+       adev->gfxhub.funcs->init(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
+       if (r) {
+               dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+               return r;
+       }
+
+       /* Reload GFX firmware */
+       r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count);
+       if (r) {
+               dev_err(adev->dev, "GFX ucode load failed after reset\n");
+               return r;
+       }
+
+       /* Resume RLC, FW needs RLC alive to complete reset process */
+       adev->gfx.rlc.funcs->resume(adev);
+
+       /* Wait for FW reset event complete */
+       r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+       if (r) {
+               dev_err(adev->dev,
+                       "Failed to get response from firmware after reset\n");
+               return r;
+       }
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
+               r = adev->ip_blocks[i].version->funcs->resume(adev);
+               if (r) {
+                       dev_err(adev->dev,
+                               "resume of IP block <%s> failed %d\n",
+                               adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
+               }
+
+               adev->ip_blocks[i].status.hw = true;
+       }
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_COMMON))
+                       continue;
+
+               if (adev->ip_blocks[i].version->funcs->late_init) {
+                       r = adev->ip_blocks[i].version->funcs->late_init(
+                               (void *)adev);
+                       if (r) {
+                               dev_err(adev->dev,
+                                       "late_init of IP block <%s> failed %d after reset\n",
+                                       adev->ip_blocks[i].version->funcs->name,
+                                       r);
+                               return r;
+                       }
+               }
+               adev->ip_blocks[i].status.late_initialized = true;
+       }
+
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+       return r;
+}
+
+static int
+aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+                                 struct amdgpu_reset_context *reset_context)
+{
+       int r;
+       struct amdgpu_device *tmp_adev = NULL;
+
+       if (reset_context->hive == NULL) {
+               /* Wrong context, return error */
+               return -EINVAL;
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               dev_info(tmp_adev->dev,
+                        "GPU reset succeeded, trying to resume\n");
+               r = aldebaran_mode2_restore_ip(tmp_adev);
+               if (r)
+                       goto end;
+
+               /*
+                * Add this ASIC as tracked as reset was already
+                * complete successfully.
+                */
+               amdgpu_register_gpu_instance(tmp_adev);
+
+               /* Resume RAS */
+               amdgpu_ras_resume(tmp_adev);
+
+               /* Update PSP FW topology after reset */
+               if (reset_context->hive &&
+                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                       r = amdgpu_xgmi_update_topology(reset_context->hive,
+                                                       tmp_adev);
+
+               if (!r) {
+                       amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+                       r = amdgpu_ib_ring_tests(tmp_adev);
+                       if (r) {
+                               dev_err(tmp_adev->dev,
+                                       "ib ring test failed (%d).\n", r);
+                               r = -EAGAIN;
+                               tmp_adev->asic_reset_res = r;
+                               goto end;
+                       }
+               }
+       }
+
+end:
+       return r;
+}
+
+static struct amdgpu_reset_handler aldebaran_mode2_handler = {
+       .reset_method           = AMD_RESET_METHOD_MODE2,
+       .prepare_env            = NULL,
+       .prepare_hwcontext      = aldebaran_mode2_prepare_hwcontext,
+       .perform_reset          = aldebaran_mode2_perform_reset,
+       .restore_hwcontext      = aldebaran_mode2_restore_hwcontext,
+       .restore_env            = NULL,
+       .do_reset               = aldebaran_mode2_reset,
+};
+
+int aldebaran_reset_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_reset_control *reset_ctl;
+
+       reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+       if (!reset_ctl)
+               return -ENOMEM;
+
+       reset_ctl->handle = adev;
+       reset_ctl->async_reset = aldebaran_async_reset;
+       reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+       reset_ctl->get_reset_handler = aldebaran_get_reset_handler;
+
+       INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+       INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+       /* Only mode2 is handled through reset control now */
+       amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler);
+
+       adev->reset_cntl = reset_ctl;
+
+       return 0;
+}
+
+int aldebaran_reset_fini(struct amdgpu_device *adev)
+{
+       kfree(adev->reset_cntl);
+       adev->reset_cntl = NULL;
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
new file mode 100644 (file)
index 0000000..a07db54
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ALDEBARAN_H__
+#define __ALDEBARAN_H__
+
+#include "amdgpu.h"
+
+int aldebaran_reset_init(struct amdgpu_device *adev);
+int aldebaran_reset_fini(struct amdgpu_device *adev);
+
+#endif
index a037c223c251b7ca57b026534a30a1d4c89c2af3..dc3a69296321b350cf8e954613e3f10c9c7e7c3e 100644 (file)
 #include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 #include "amdgpu_smuio.h"
-#include "amdgpu_hdp.h"
 
 #define MAX_GPU_INSTANCE               16
 
@@ -271,6 +270,8 @@ struct amdgpu_bo_va_mapping;
 struct amdgpu_atif;
 struct kfd_vm_fault_info;
 struct amdgpu_hive_info;
+struct amdgpu_reset_context;
+struct amdgpu_reset_control;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -589,6 +590,7 @@ struct amdgpu_allowed_register_entry {
 };
 
 enum amd_reset_method {
+       AMD_RESET_METHOD_NONE = -1,
        AMD_RESET_METHOD_LEGACY = 0,
        AMD_RESET_METHOD_MODE0,
        AMD_RESET_METHOD_MODE1,
@@ -920,6 +922,7 @@ struct amdgpu_device {
        struct amdgpu_irq_src           pageflip_irq;
        struct amdgpu_irq_src           hpd_irq;
        struct amdgpu_irq_src           dmub_trace_irq;
+       struct amdgpu_irq_src           dmub_outbox_irq;
 
        /* rings */
        u64                             fence_context;
@@ -1030,13 +1033,9 @@ struct amdgpu_device {
 
        /* s3/s4 mask */
        bool                            in_suspend;
-       bool                            in_hibernate;
-
-       /*
-        * The combination flag in_poweroff_reboot_com used to identify the poweroff
-        * and reboot opt in the s0i3 system-wide suspend.
-        */
-       bool                            in_poweroff_reboot_com;
+       bool                            in_s3;
+       bool                            in_s4;
+       bool                            in_s0ix;
 
        atomic_t                        in_gpu_reset;
        enum pp_mp1_state               mp1_state;
@@ -1078,6 +1077,8 @@ struct amdgpu_device {
 
        bool                            in_pci_err_recovery;
        struct pci_saved_state          *pci_state;
+
+       struct amdgpu_reset_control     *reset_cntl;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1129,13 +1130,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
-                                 struct amdgpu_job *job,
-                                 bool *need_full_reset_arg);
+                                struct amdgpu_reset_context *reset_context);
 
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
-                         struct list_head *device_list_handle,
-                         bool *need_full_reset_arg,
-                         bool skip_hw_reset);
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+                        struct amdgpu_reset_context *reset_context);
 
 int emu_soc_asic_init(struct amdgpu_device *adev);
 
@@ -1275,8 +1273,9 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
                                             const u32 *registers,
                                             const u32 array_size);
 
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct drm_device *dev);
+bool amdgpu_device_supports_px(struct drm_device *dev);
 bool amdgpu_device_supports_boco(struct drm_device *dev);
 bool amdgpu_device_supports_baco(struct drm_device *dev);
 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1390,6 +1389,13 @@ void amdgpu_pci_resume(struct pci_dev *pdev);
 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
 
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+                              enum amd_clockgating_state state);
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+                              enum amd_powergating_state state);
+
 #include "amdgpu_object.h"
 
 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
index 1c6be53313a83b711481f2251954b531e682d257..5f6696a3c778c599f52c23868546442c1687afeb 100644 (file)
@@ -246,6 +246,7 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        if (cp_mqd_gfx9)
                bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
@@ -317,6 +318,7 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_bo *bo = NULL;
+       struct amdgpu_bo_user *ubo;
        struct amdgpu_bo_param bp;
        int r;
 
@@ -327,14 +329,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
        bp.type = ttm_bo_type_device;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
-       r = amdgpu_bo_create(adev, &bp, &bo);
+       r = amdgpu_bo_create_user(adev, &bp, &ubo);
        if (r) {
                dev_err(adev->dev,
                        "failed to allocate gws BO for amdkfd (%d)\n", r);
                return r;
        }
 
+       bo = &ubo->bo;
        *mem_obj = bo;
        return 0;
 }
@@ -495,8 +499,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
                *dma_buf_kgd = (struct kgd_dev *)adev;
        if (bo_size)
                *bo_size = amdgpu_bo_size(bo);
-       if (metadata_size)
-               *metadata_size = bo->metadata_size;
        if (metadata_buffer)
                r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
                                           metadata_size, &metadata_flags);
index e05648a8a14544ee575edadb0151c106c56149b6..494b2e1717d52063d9bc4843c4b3a9529a9eb8e9 100644 (file)
@@ -1232,157 +1232,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
        return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
 }
 
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
-                                             u16 *leakage_id)
-{
-       union set_voltage args;
-       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-       u8 frev, crev;
-
-       if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
-               return -EINVAL;
-
-       switch (crev) {
-       case 3:
-       case 4:
-               args.v3.ucVoltageType = 0;
-               args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
-               args.v3.usVoltageLevel = 0;
-
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
-               *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
-               break;
-       default:
-               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
-                                                            u16 *vddc, u16 *vddci,
-                                                            u16 virtual_voltage_id,
-                                                            u16 vbios_voltage_id)
-{
-       int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
-       u8 frev, crev;
-       u16 data_offset, size;
-       int i, j;
-       ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
-       u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
-       *vddc = 0;
-       *vddci = 0;
-
-       if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset))
-               return -EINVAL;
-
-       profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
-               (adev->mode_info.atom_context->bios + data_offset);
-
-       switch (frev) {
-       case 1:
-               return -EINVAL;
-       case 2:
-               switch (crev) {
-               case 1:
-                       if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
-                               return -EINVAL;
-                       leakage_bin = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usLeakageBinArrayOffset));
-                       vddc_id_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
-                       vddc_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
-                       vddci_id_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
-                       vddci_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
-                       if (profile->ucElbVDDC_Num > 0) {
-                               for (i = 0; i < profile->ucElbVDDC_Num; i++) {
-                                       if (vddc_id_buf[i] == virtual_voltage_id) {
-                                               for (j = 0; j < profile->ucLeakageBinNum; j++) {
-                                                       if (vbios_voltage_id <= leakage_bin[j]) {
-                                                               *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
-                                                               break;
-                                                       }
-                                               }
-                                               break;
-                                       }
-                               }
-                       }
-                       if (profile->ucElbVDDCI_Num > 0) {
-                               for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
-                                       if (vddci_id_buf[i] == virtual_voltage_id) {
-                                               for (j = 0; j < profile->ucLeakageBinNum; j++) {
-                                                       if (vbios_voltage_id <= leakage_bin[j]) {
-                                                               *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
-                                                               break;
-                                                       }
-                                               }
-                                               break;
-                                       }
-                               }
-                       }
-                       break;
-               default:
-                       DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-                       return -EINVAL;
-               }
-               break;
-       default:
-               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-union get_voltage_info {
-       struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
-       struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
-                                   u16 virtual_voltage_id,
-                                   u16 *voltage)
-{
-       int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
-       u32 entry_id;
-       u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
-       union get_voltage_info args;
-
-       for (entry_id = 0; entry_id < count; entry_id++) {
-               if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
-                   virtual_voltage_id)
-                       break;
-       }
-
-       if (entry_id >= count)
-               return -EINVAL;
-
-       args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
-       args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
-       args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
-       args.in.ulSCLKFreq =
-               cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
-       *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
-       return 0;
-}
-
 union voltage_object_info {
        struct _ATOM_VOLTAGE_OBJECT_INFO v1;
        struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
@@ -1913,7 +1762,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct atom_context *ctx = adev->mode_info.atom_context;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+       return sysfs_emit(buf, "%s\n", ctx->vbios_version);
 }
 
 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
index 1321ec09c734cf769ee18fa9b035e9b8b67c27ce..8cc0222dba1910e1569a88d4e8d4313c3420b76c 100644 (file)
@@ -168,18 +168,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
                                             u32 eng_clock, u32 mem_clock);
 
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
-                                             u16 *leakage_id);
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
-                                                            u16 *vddc, u16 *vddci,
-                                                            u16 virtual_voltage_id,
-                                                            u16 vbios_voltage_id);
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
-                                   u16 virtual_voltage_id,
-                                   u16 *voltage);
-
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
                                u8 voltage_type, u8 voltage_mode);
index d9b35df33806d178afbea6d44954396121bee021..313517f7cf107c9a8ad6deb7e9bfa0f672cab5ed 100644 (file)
@@ -85,6 +85,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
        bp.flags = 0;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
        n = AMDGPU_BENCHMARK_ITERATIONS;
        r = amdgpu_bo_create(adev, &bp, &sobj);
        if (r) {
index 0f82c5d212372354a2de059509f478e67c7c3fd7..b4ad1c055c702058fe92055f31c4cc2a924c10dc 100644 (file)
@@ -65,6 +65,7 @@
 #include "amdgpu_ras.h"
 #include "amdgpu_pmu.h"
 #include "amdgpu_fru_eeprom.h"
+#include "amdgpu_reset.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
@@ -137,7 +138,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+       return sysfs_emit(buf, "%llu\n", cnt);
 }
 
 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
@@ -161,7 +162,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+       return sysfs_emit(buf, "%s\n", adev->product_name);
 }
 
 static DEVICE_ATTR(product_name, S_IRUGO,
@@ -183,7 +184,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+       return sysfs_emit(buf, "%s\n", adev->product_number);
 }
 
 static DEVICE_ATTR(product_number, S_IRUGO,
@@ -205,25 +206,25 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+       return sysfs_emit(buf, "%s\n", adev->serial);
 }
 
 static DEVICE_ATTR(serial_number, S_IRUGO,
                amdgpu_device_get_serial_number, NULL);
 
 /**
- * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
  *
  * @dev: drm_device pointer
  *
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ATPX power control,
  * otherwise return false.
  */
-bool amdgpu_device_supports_atpx(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct drm_device *dev)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
 
-       if (adev->flags & AMD_IS_PX)
+       if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
                return true;
        return false;
 }
@@ -233,14 +234,15 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev)
  *
  * @dev: drm_device pointer
  *
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ACPI power control,
  * otherwise return false.
  */
 bool amdgpu_device_supports_boco(struct drm_device *dev)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
 
-       if (adev->has_pr3)
+       if (adev->has_pr3 ||
+           ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
                return true;
        return false;
 }
@@ -326,6 +328,35 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 /*
  * register access helper functions.
  */
+
+/* Check if hw access should be skipped because of hotplug or device error */
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
+{
+       if (adev->in_pci_err_recovery)
+               return true;
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * This is a bit complicated to understand, so worth a comment. What we assert
+        * here is that the GPU reset is not running on another thread in parallel.
+        *
+        * For this we trylock the read side of the reset semaphore, if that succeeds
+        * we know that the reset is not running in paralell.
+        *
+        * If the trylock fails we assert that we are either already holding the read
+        * side of the lock or are the reset thread itself and hold the write side of
+        * the lock.
+        */
+       if (in_task()) {
+               if (down_read_trylock(&adev->reset_sem))
+                       up_read(&adev->reset_sem);
+               else
+                       lockdep_assert_held(&adev->reset_sem);
+       }
+#endif
+       return false;
+}
+
 /**
  * amdgpu_device_rreg - read a memory mapped IO or indirect register
  *
@@ -340,7 +371,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 {
        uint32_t ret;
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if ((reg * 4) < adev->rmmio_size) {
@@ -377,7 +408,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
  */
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (offset < adev->rmmio_size)
@@ -402,7 +433,7 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
  */
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (offset < adev->rmmio_size)
@@ -425,7 +456,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
                        uint32_t reg, uint32_t v,
                        uint32_t acc_flags)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if ((reg * 4) < adev->rmmio_size) {
@@ -452,14 +483,14 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
                             uint32_t reg, uint32_t v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (amdgpu_sriov_fullaccess(adev) &&
            adev->gfx.rlc.funcs &&
            adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
-                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
        } else {
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        }
@@ -476,7 +507,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
  */
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -499,7 +530,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -520,7 +551,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
  */
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -543,7 +574,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -1391,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
        struct drm_device *dev = pci_get_drvdata(pdev);
        int r;
 
-       if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
+       if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
                return;
 
        if (state == VGA_SWITCHEROO_ON) {
@@ -2049,6 +2080,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                                amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
                                return r;
                        }
+
+                       /*get pf2vf msg info at it's earliest time*/
+                       if (amdgpu_sriov_vf(adev))
+                               amdgpu_virt_init_data_exchange(adev);
+
                }
        }
 
@@ -2331,8 +2367,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
  * Returns 0 on success, negative error code on failure.
  */
 
-static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
-                                               enum amd_clockgating_state state)
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+                              enum amd_clockgating_state state)
 {
        int i, j, r;
 
@@ -2343,6 +2379,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
                i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip CG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2363,7 +2403,8 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+                              enum amd_powergating_state state)
 {
        int i, j, r;
 
@@ -2374,6 +2415,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
                i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip PG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2655,11 +2700,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
-               amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-               amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-       }
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -2699,6 +2741,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (adev->in_s0ix)
+               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -2721,6 +2766,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = false;
                        continue;
                }
+
+               /* skip suspend of gfx and psp for S0ix
+                * gfx is in gfxoff state, so on resume it will exit gfxoff just
+                * like at runtime. PSP is also part of the always on hardware
+                * so no need to suspend it.
+                */
+               if (adev->in_s0ix &&
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+                       continue;
+
                /* XXX handle errors */
                r = adev->ip_blocks[i].version->funcs->suspend(adev);
                /* XXX handle errors */
@@ -3086,8 +3142,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
                if (adev->asic_reset_res)
                        goto fail;
 
-               if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
-                       adev->mmhub.funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->reset_ras_error_count)
+                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
        } else {
 
                task_barrier_full(&hive->tb);
@@ -3197,7 +3254,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        struct drm_device *ddev = adev_to_drm(adev);
        struct pci_dev *pdev = adev->pdev;
        int r, i;
-       bool atpx = false;
+       bool px = false;
        u32 max_MBps;
 
        adev->shutdown = false;
@@ -3359,16 +3416,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
                vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
 
-       if (amdgpu_device_supports_atpx(ddev))
-               atpx = true;
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           !pci_is_thunderbolt_attached(adev->pdev))
+       if (amdgpu_device_supports_px(ddev)) {
+               px = true;
                vga_switcheroo_register_client(adev->pdev,
-                                              &amdgpu_switcheroo_ops, atpx);
-       if (atpx)
+                                              &amdgpu_switcheroo_ops, px);
                vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+       }
 
        if (amdgpu_emu_mode == 1) {
                /* post the asic on emulation mode */
@@ -3376,6 +3429,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                goto fence_driver_init;
        }
 
+       amdgpu_reset_init(adev);
+
        /* detect if we are with an SRIOV vbios */
        amdgpu_device_detect_sriov_bios(adev);
 
@@ -3575,7 +3630,7 @@ release_ras_con:
 
 failed:
        amdgpu_vf_error_trans_all(adev);
-       if (atpx)
+       if (px)
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
 
 failed_unmap:
@@ -3626,6 +3681,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        release_firmware(adev->firmware.gpu_info_fw);
        adev->firmware.gpu_info_fw = NULL;
        adev->accel_working = false;
+
+       amdgpu_reset_fini(adev);
+
        /* free i2c buses */
        if (!amdgpu_device_has_dc_support(adev))
                amdgpu_i2c_fini(adev);
@@ -3635,13 +3693,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        kfree(adev->bios);
        adev->bios = NULL;
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           !pci_is_thunderbolt_attached(adev->pdev))
+       if (amdgpu_device_supports_px(adev_to_drm(adev))) {
                vga_switcheroo_unregister_client(adev->pdev);
-       if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
+       }
        if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
                vga_client_register(adev->pdev, NULL, NULL, NULL);
        iounmap(adev->rmmio);
@@ -3674,14 +3729,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  */
 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 {
-       struct amdgpu_device *adev;
-       struct drm_crtc *crtc;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
+       struct amdgpu_device *adev = drm_to_adev(dev);
        int r;
 
-       adev = drm_to_adev(dev);
-
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
@@ -3693,61 +3743,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        cancel_delayed_work_sync(&adev->delayed_init_work);
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* turn off display hw */
-               drm_modeset_lock_all(dev);
-               drm_connector_list_iter_begin(dev, &iter);
-               drm_for_each_connector_iter(connector, &iter)
-                       drm_helper_connector_dpms(connector,
-                                                 DRM_MODE_DPMS_OFF);
-               drm_connector_list_iter_end(&iter);
-               drm_modeset_unlock_all(dev);
-                       /* unpin the front buffers and cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       struct drm_framebuffer *fb = crtc->primary->fb;
-                       struct amdgpu_bo *robj;
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-
-                       if (fb == NULL || fb->obj[0] == NULL) {
-                               continue;
-                       }
-                       robj = gem_to_amdgpu_bo(fb->obj[0]);
-                       /* don't unpin kernel fb objects */
-                       if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
-                               r = amdgpu_bo_reserve(robj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(robj);
-                                       amdgpu_bo_unreserve(robj);
-                               }
-                       }
-               }
-       }
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
-       amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+       if (!adev->in_s0ix)
+               amdgpu_amdkfd_suspend(adev, adev->in_runpm);
 
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
        amdgpu_fence_driver_suspend(adev);
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
-               r = amdgpu_device_ip_suspend_phase2(adev);
-       else
-               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+       r = amdgpu_device_ip_suspend_phase2(adev);
        /* evict remaining vram memory
         * This second call to evict vram is to evict the gart page table
         * using the CPU.
@@ -3769,16 +3777,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
  */
 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 {
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
        struct amdgpu_device *adev = drm_to_adev(dev);
-       struct drm_crtc *crtc;
        int r = 0;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (amdgpu_acpi_is_s0ix_supported(adev))
+       if (adev->in_s0ix)
                amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
 
        /* post card */
@@ -3803,50 +3808,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* pin cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
-                                       if (r != 0)
-                                               dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
-                                       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-               }
+       if (!adev->in_s0ix) {
+               r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+               if (r)
+                       return r;
        }
-       r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
-       if (r)
-               return r;
 
        /* Make sure IB tests flushed */
        flush_delayed_work(&adev->delayed_init_work);
 
-       /* blat the mode back in */
-       if (fbcon) {
-               if (!amdgpu_device_has_dc_support(adev)) {
-                       /* pre DCE11 */
-                       drm_helper_resume_force_mode(dev);
-
-                       /* turn on display hw */
-                       drm_modeset_lock_all(dev);
-
-                       drm_connector_list_iter_begin(dev, &iter);
-                       drm_for_each_connector_iter(connector, &iter)
-                               drm_helper_connector_dpms(connector,
-                                                         DRM_MODE_DPMS_ON);
-                       drm_connector_list_iter_end(&iter);
-
-                       drm_modeset_unlock_all(dev);
-               }
+       if (fbcon)
                amdgpu_fbdev_set_suspend(adev, 0);
-       }
 
        drm_kms_helper_poll_enable(dev);
 
@@ -4144,11 +4116,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
        amdgpu_amdkfd_post_reset(adev);
 
 error:
-       amdgpu_virt_release_full_gpu(adev, true);
        if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
                amdgpu_inc_vram_lost(adev);
                r = amdgpu_device_recover_vram(adev);
        }
+       amdgpu_virt_release_full_gpu(adev, true);
 
        return r;
 }
@@ -4225,6 +4197,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_SIENNA_CICHLID:
                case CHIP_NAVY_FLOUNDER:
                case CHIP_DIMGREY_CAVEFISH:
+               case CHIP_VANGOGH:
+               case CHIP_ALDEBARAN:
                        break;
                default:
                        goto disabled;
@@ -4279,11 +4253,15 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 }
 
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
-                                 struct amdgpu_job *job,
-                                 bool *need_full_reset_arg)
+                                struct amdgpu_reset_context *reset_context)
 {
        int i, r = 0;
-       bool need_full_reset  = *need_full_reset_arg;
+       struct amdgpu_job *job = NULL;
+       bool need_full_reset =
+               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+       if (reset_context->reset_req_dev == adev)
+               job = reset_context->job;
 
        /* no need to dump if device is not in good state during probe period */
        if (!adev->gmc.xgmi.pending_reset)
@@ -4308,6 +4286,13 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        if(job)
                drm_sched_increase_karma(&job->base);
 
+       r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+       /* If reset handler not implemented, continue; otherwise return */
+       if (r == -ENOSYS)
+               r = 0;
+       else
+               return r;
+
        /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
        if (!amdgpu_sriov_vf(adev)) {
 
@@ -4326,22 +4311,38 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
                if (need_full_reset)
                        r = amdgpu_device_ip_suspend(adev);
-
-               *need_full_reset_arg = need_full_reset;
+               if (need_full_reset)
+                       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+               else
+                       clear_bit(AMDGPU_NEED_FULL_RESET,
+                                 &reset_context->flags);
        }
 
        return r;
 }
 
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
-                         struct list_head *device_list_handle,
-                         bool *need_full_reset_arg,
-                         bool skip_hw_reset)
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+                        struct amdgpu_reset_context *reset_context)
 {
        struct amdgpu_device *tmp_adev = NULL;
-       bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+       bool need_full_reset, skip_hw_reset, vram_lost = false;
        int r = 0;
 
+       /* Try reset handler method first */
+       tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+                                   reset_list);
+       r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+       /* If reset handler not implemented, continue; otherwise return */
+       if (r == -ENOSYS)
+               r = 0;
+       else
+               return r;
+
+       /* Reset handler not implemented, use the default method */
+       need_full_reset =
+               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+       skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
        /*
         * ASIC reset has to be done on all XGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
@@ -4378,9 +4379,9 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 
        if (!r && amdgpu_ras_intr_triggered()) {
                list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-                       if (tmp_adev->mmhub.funcs &&
-                           tmp_adev->mmhub.funcs->reset_ras_error_count)
-                               tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+                       if (tmp_adev->mmhub.ras_funcs &&
+                           tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
+                               tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
                }
 
                amdgpu_ras_intr_cleared();
@@ -4425,7 +4426,8 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                 */
                                amdgpu_register_gpu_instance(tmp_adev);
 
-                               if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                               if (!reset_context->hive &&
+                                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
                                        amdgpu_xgmi_add_device(tmp_adev);
 
                                r = amdgpu_device_ip_late_init(tmp_adev);
@@ -4453,8 +4455,10 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                }
 
                                /* Update PSP FW topology after reset */
-                               if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
-                                       r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+                               if (reset_context->hive &&
+                                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                                       r = amdgpu_xgmi_update_topology(
+                                               reset_context->hive, tmp_adev);
                        }
                }
 
@@ -4478,7 +4482,10 @@ out:
        }
 
 end:
-       *need_full_reset_arg = need_full_reset;
+       if (need_full_reset)
+               set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+       else
+               clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
        return r;
 }
 
@@ -4615,6 +4622,74 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
        return 0;
 }
 
+void amdgpu_device_recheck_guilty_jobs(
+       struct amdgpu_device *adev, struct list_head *device_list_handle,
+       struct amdgpu_reset_context *reset_context)
+{
+       int i, r = 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               int ret = 0;
+               struct drm_sched_job *s_job;
+
+               if (!ring || !ring->sched.thread)
+                       continue;
+
+               s_job = list_first_entry_or_null(&ring->sched.pending_list,
+                               struct drm_sched_job, list);
+               if (s_job == NULL)
+                       continue;
+
+               /* clear job's guilty and depend the folowing step to decide the real one */
+               drm_sched_reset_karma(s_job);
+               drm_sched_resubmit_jobs_ext(&ring->sched, 1);
+
+               ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
+               if (ret == 0) { /* timeout */
+                       DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
+                                               ring->sched.name, s_job->id);
+
+                       /* set guilty */
+                       drm_sched_increase_karma(s_job);
+retry:
+                       /* do hw reset */
+                       if (amdgpu_sriov_vf(adev)) {
+                               amdgpu_virt_fini_data_exchange(adev);
+                               r = amdgpu_device_reset_sriov(adev, false);
+                               if (r)
+                                       adev->asic_reset_res = r;
+                       } else {
+                               clear_bit(AMDGPU_SKIP_HW_RESET,
+                                         &reset_context->flags);
+                               r = amdgpu_do_asic_reset(device_list_handle,
+                                                        reset_context);
+                               if (r && r == -EAGAIN)
+                                       goto retry;
+                       }
+
+                       /*
+                        * add reset counter so that the following
+                        * resubmitted job could flush vmid
+                        */
+                       atomic_inc(&adev->gpu_reset_counter);
+                       continue;
+               }
+
+               /* got the hw fence, signal finished fence */
+               atomic_dec(ring->sched.score);
+               dma_fence_get(&s_job->s_fence->finished);
+               dma_fence_signal(&s_job->s_fence->finished);
+               dma_fence_put(&s_job->s_fence->finished);
+
+               /* remove node from list and free the job */
+               spin_lock(&ring->sched.job_list_lock);
+               list_del_init(&s_job->list);
+               spin_unlock(&ring->sched.job_list_lock);
+               ring->sched.ops->free_job(s_job);
+       }
+}
+
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
@@ -4630,13 +4705,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
-       bool need_full_reset = false;
        bool job_signaled = false;
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
        bool need_emergency_restart = false;
        bool audio_suspended = false;
+       int tmp_vram_lost_counter;
+       struct amdgpu_reset_context reset_context;
+
+       memset(&reset_context, 0, sizeof(reset_context));
 
        /*
         * Special case: RAS triggered and full reset isn't supported
@@ -4677,6 +4755,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                mutex_lock(&hive->hive_lock);
        }
 
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       reset_context.reset_req_dev = adev;
+       reset_context.job = job;
+       reset_context.hive = hive;
+       clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
        /*
         * lock the device before we try to operate the linked list
         * if didn't get the device lock, don't touch the linked list since
@@ -4777,9 +4861,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               r = amdgpu_device_pre_asic_reset(tmp_adev,
-                                                (tmp_adev == adev) ? job : NULL,
-                                                &need_full_reset);
+               r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
                /*TODO Should we stop ?*/
                if (r) {
                        dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -4788,6 +4870,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                }
        }
 
+       tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
        /* Actual ASIC resets if needed.*/
        /* TODO Implement XGMI hive reset logic for SRIOV */
        if (amdgpu_sriov_vf(adev)) {
@@ -4795,7 +4878,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                if (r)
                        adev->asic_reset_res = r;
        } else {
-               r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
+               r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
                if (r && r == -EAGAIN)
                        goto retry;
        }
@@ -4805,6 +4888,18 @@ skip_hw_reset:
        /* Post ASIC reset for all devs .*/
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
+               /*
+                * Sometimes a later bad compute job can block a good gfx job as gfx
+                * and compute ring share internal GC HW mutually. We add an additional
+                * guilty jobs recheck step to find the real guilty job, it synchronously
+                * submits and pends for the first job being signaled. If it gets timeout,
+                * we identify it as a real guilty job.
+                */
+               if (amdgpu_gpu_recovery == 2 &&
+                       !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
+                       amdgpu_device_recheck_guilty_jobs(
+                               tmp_adev, device_list_handle, &reset_context);
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
@@ -5148,12 +5243,14 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct amdgpu_device *adev = drm_to_adev(dev);
        int r, i;
-       bool need_full_reset = true;
+       struct amdgpu_reset_context reset_context;
        u32 memsize;
        struct list_head device_list;
 
        DRM_INFO("PCI error: slot reset callback!!\n");
 
+       memset(&reset_context, 0, sizeof(reset_context));
+
        INIT_LIST_HEAD(&device_list);
        list_add_tail(&adev->reset_list, &device_list);
 
@@ -5176,13 +5273,18 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
                goto out;
        }
 
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       reset_context.reset_req_dev = adev;
+       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+       set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+
        adev->in_pci_err_recovery = true;
-       r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+       r = amdgpu_device_pre_asic_reset(adev, &reset_context);
        adev->in_pci_err_recovery = false;
        if (r)
                goto out;
 
-       r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+       r = amdgpu_do_asic_reset(&device_list, &reset_context);
 
 out:
        if (!r) {
index b05301e1815c8c2f22381eb57642022045235514..9a2f811450edcbcca68e035d2e616161724cffad 100644 (file)
@@ -1354,3 +1354,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
        return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
                                                  stime, etime, mode);
 }
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       int r;
+
+       /* turn off display hw */
+       drm_modeset_lock_all(dev);
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_OFF);
+       drm_connector_list_iter_end(&iter);
+       drm_modeset_unlock_all(dev);
+       /* unpin the front buffers and cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+               struct drm_framebuffer *fb = crtc->primary->fb;
+               struct amdgpu_bo *robj;
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+
+               if (fb == NULL || fb->obj[0] == NULL) {
+                       continue;
+               }
+               robj = gem_to_amdgpu_bo(fb->obj[0]);
+               /* don't unpin kernel fb objects */
+               if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+                       r = amdgpu_bo_reserve(robj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(robj);
+                               amdgpu_bo_unreserve(robj);
+                       }
+               }
+       }
+       return r;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       struct drm_crtc *crtc;
+       int r;
+
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+                               if (r != 0)
+                                       dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+       }
+
+       drm_helper_resume_force_mode(dev);
+
+       /* turn on display hw */
+       drm_modeset_lock_all(dev);
+
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_ON);
+       drm_connector_list_iter_end(&iter);
+
+       drm_modeset_unlock_all(dev);
+
+       return 0;
+}
+
index dc7b7d1165493c249fa7cb55b6d42d70ade8b0ed..7b6d83e2b13ca4bbedee70a12b74a56f087ce647 100644 (file)
@@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
 const struct drm_format_info *
 amdgpu_lookup_format_info(u32 format, uint64_t modifier);
 
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
 #endif
index 33991b4a5627e37842bbd63e29395acb81587b7d..d8f131ed10cb530e2c590ea1def66569aeb34dff 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_probe_helper.h>
 #include <linux/mmu_notifier.h>
+#include <linux/suspend.h>
 
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
@@ -46,6 +47,7 @@
 
 #include "amdgpu_ras.h"
 #include "amdgpu_xgmi.h"
+#include "amdgpu_reset.h"
 
 /*
  * KMS wrapper.
@@ -515,7 +517,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
  * DOC: gpu_recovery (int)
  * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
  */
-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
 module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
 
 /**
@@ -1161,6 +1163,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 
        /* Van Gogh */
@@ -1333,9 +1336,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
         */
        if (!amdgpu_passthrough(adev))
                adev->mp1_state = PP_MP1_STATE_UNLOAD;
-       adev->in_poweroff_reboot_com = true;
        amdgpu_device_ip_suspend(adev);
-       adev->in_poweroff_reboot_com = false;
        adev->mp1_state = PP_MP1_STATE_NONE;
 }
 
@@ -1349,7 +1350,9 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        struct list_head device_list;
        struct amdgpu_device *adev;
        int i, r;
-       bool need_full_reset = true;
+       struct amdgpu_reset_context reset_context;
+
+       memset(&reset_context, 0, sizeof(reset_context));
 
        mutex_lock(&mgpu_info.mutex);
        if (mgpu_info.pending_reset == true) {
@@ -1359,9 +1362,14 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        mgpu_info.pending_reset = true;
        mutex_unlock(&mgpu_info.mutex);
 
+       /* Use a common context, just need to make sure full reset is done */
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
        for (i = 0; i < mgpu_info.num_dgpu; i++) {
                adev = mgpu_info.gpu_ins[i].adev;
-               r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+               reset_context.reset_req_dev = adev;
+               r = amdgpu_device_pre_asic_reset(adev, &reset_context);
                if (r) {
                        dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
                                r, adev_to_drm(adev)->unique);
@@ -1388,7 +1396,10 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        list_for_each_entry(adev, &device_list, reset_list)
                amdgpu_unregister_gpu_instance(adev);
 
-       r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+       /* Use a common context, just need to make sure full reset is done */
+       set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+       r = amdgpu_do_asic_reset(&device_list, &reset_context);
+
        if (r) {
                DRM_ERROR("reinit gpus failure");
                return;
@@ -1402,18 +1413,50 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        return;
 }
 
+static int amdgpu_pmops_prepare(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+       /* Return a positive number here so
+        * DPM_FLAG_SMART_SUSPEND works properly
+        */
+       if (amdgpu_device_supports_boco(drm_dev))
+               return pm_runtime_suspended(dev) &&
+                       pm_suspend_via_firmware();
+
+       return 0;
+}
+
+static void amdgpu_pmops_complete(struct device *dev)
+{
+       /* nothing to do */
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_suspend(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = true;
+       adev->in_s3 = true;
+       r = amdgpu_device_suspend(drm_dev, true);
+       adev->in_s3 = false;
+
+       return r;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_resume(drm_dev, true);
+       r = amdgpu_device_resume(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = false;
+       return r;
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
@@ -1422,9 +1465,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
        int r;
 
-       adev->in_hibernate = true;
+       adev->in_s4 = true;
        r = amdgpu_device_suspend(drm_dev, true);
-       adev->in_hibernate = false;
+       adev->in_s4 = false;
        if (r)
                return r;
        return amdgpu_asic_reset(adev);
@@ -1440,13 +1483,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
 static int amdgpu_pmops_poweroff(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
-       adev->in_poweroff_reboot_com = true;
-       r =  amdgpu_device_suspend(drm_dev, true);
-       adev->in_poweroff_reboot_com = false;
-       return r;
+       return amdgpu_device_suspend(drm_dev, true);
 }
 
 static int amdgpu_pmops_restore(struct device *dev)
@@ -1479,7 +1517,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        }
 
        adev->in_runpm = true;
-       if (amdgpu_device_supports_atpx(drm_dev))
+       if (amdgpu_device_supports_px(drm_dev))
                drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
        ret = amdgpu_device_suspend(drm_dev, false);
@@ -1488,16 +1526,14 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                return ret;
        }
 
-       if (amdgpu_device_supports_atpx(drm_dev)) {
+       if (amdgpu_device_supports_px(drm_dev)) {
                /* Only need to handle PCI state in the driver for ATPX
                 * PCI core handles it for _PR3.
                 */
-               if (!amdgpu_is_atpx_hybrid()) {
-                       amdgpu_device_cache_pci_state(pdev);
-                       pci_disable_device(pdev);
-                       pci_ignore_hotplug(pdev);
-                       pci_set_power_state(pdev, PCI_D3cold);
-               }
+               amdgpu_device_cache_pci_state(pdev);
+               pci_disable_device(pdev);
+               pci_ignore_hotplug(pdev);
+               pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
@@ -1516,19 +1552,17 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
        if (!adev->runpm)
                return -EINVAL;
 
-       if (amdgpu_device_supports_atpx(drm_dev)) {
+       if (amdgpu_device_supports_px(drm_dev)) {
                drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
                /* Only need to handle PCI state in the driver for ATPX
                 * PCI core handles it for _PR3.
                 */
-               if (!amdgpu_is_atpx_hybrid()) {
-                       pci_set_power_state(pdev, PCI_D0);
-                       amdgpu_device_load_pci_state(pdev);
-                       ret = pci_enable_device(pdev);
-                       if (ret)
-                               return ret;
-               }
+               pci_set_power_state(pdev, PCI_D0);
+               amdgpu_device_load_pci_state(pdev);
+               ret = pci_enable_device(pdev);
+               if (ret)
+                       return ret;
                pci_set_master(pdev);
        } else if (amdgpu_device_supports_boco(drm_dev)) {
                /* Only need to handle PCI state in the driver for ATPX
@@ -1539,7 +1573,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
                amdgpu_device_baco_exit(drm_dev);
        }
        ret = amdgpu_device_resume(drm_dev, false);
-       if (amdgpu_device_supports_atpx(drm_dev))
+       if (amdgpu_device_supports_px(drm_dev))
                drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
        adev->in_runpm = false;
        return 0;
@@ -1620,6 +1654,8 @@ out:
 }
 
 static const struct dev_pm_ops amdgpu_pm_ops = {
+       .prepare = amdgpu_pmops_prepare,
+       .complete = amdgpu_pmops_complete,
        .suspend = amdgpu_pmops_suspend,
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
index 1a4809d9e85022681771065da141736069a12c7b..47ea468596184509b4174db02437993f2ca93b33 100644 (file)
@@ -439,7 +439,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  * Helper function for amdgpu_fence_driver_init().
  */
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-                                 unsigned num_hw_submission)
+                                 unsigned num_hw_submission,
+                                 atomic_t *sched_score)
 {
        struct amdgpu_device *adev = ring->adev;
        long timeout;
@@ -467,30 +468,31 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
                return -ENOMEM;
 
        /* No need to setup the GPU scheduler for rings that don't need it */
-       if (!ring->no_scheduler) {
-               switch (ring->funcs->type) {
-               case AMDGPU_RING_TYPE_GFX:
-                       timeout = adev->gfx_timeout;
-                       break;
-               case AMDGPU_RING_TYPE_COMPUTE:
-                       timeout = adev->compute_timeout;
-                       break;
-               case AMDGPU_RING_TYPE_SDMA:
-                       timeout = adev->sdma_timeout;
-                       break;
-               default:
-                       timeout = adev->video_timeout;
-                       break;
-               }
+       if (ring->no_scheduler)
+               return 0;
 
-               r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
-                                  num_hw_submission, amdgpu_job_hang_limit,
-                                  timeout, NULL, ring->name);
-               if (r) {
-                       DRM_ERROR("Failed to create scheduler on ring %s.\n",
-                                 ring->name);
-                       return r;
-               }
+       switch (ring->funcs->type) {
+       case AMDGPU_RING_TYPE_GFX:
+               timeout = adev->gfx_timeout;
+               break;
+       case AMDGPU_RING_TYPE_COMPUTE:
+               timeout = adev->compute_timeout;
+               break;
+       case AMDGPU_RING_TYPE_SDMA:
+               timeout = adev->sdma_timeout;
+               break;
+       default:
+               timeout = adev->video_timeout;
+               break;
+       }
+
+       r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+                          num_hw_submission, amdgpu_job_hang_limit,
+                          timeout, sched_score, ring->name);
+       if (r) {
+               DRM_ERROR("Failed to create scheduler on ring %s.\n",
+                         ring->name);
+               return r;
        }
 
        return 0;
index 5807cad833d370fb8838c6d07e34cf400e4ef34b..c5a9a4fb10d2bde0767b8f76d95d633e5f44709f 100644 (file)
@@ -126,6 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
                        AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
                bp.type = ttm_bo_type_kernel;
                bp.resv = NULL;
+               bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
                r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
                if (r) {
                        return r;
index fb7171e5507cb957e794210f9caef897f65bfae0..311bcdc59eda67f3bf11b112bb371ef3fc08db7a 100644 (file)
@@ -58,6 +58,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
                             struct drm_gem_object **obj)
 {
        struct amdgpu_bo *bo;
+       struct amdgpu_bo_user *ubo;
        struct amdgpu_bo_param bp;
        int r;
 
@@ -71,10 +72,13 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
        bp.preferred_domain = initial_domain;
        bp.flags = flags;
        bp.domain = initial_domain;
-       r = amdgpu_bo_create(adev, &bp, &bo);
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+       r = amdgpu_bo_create_user(adev, &bp, &ubo);
        if (r)
                return r;
 
+       bo = &ubo->bo;
        *obj = &bo->tbo.base;
        (*obj)->funcs = &amdgpu_gem_object_funcs;
 
index 689addb1520d26bbac50f74ab560c848b9670174..95d4f43a03df465b701aab408cdbce729a146b72 100644 (file)
@@ -310,9 +310,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
        ring->eop_gpu_addr = kiq->eop_gpu_addr;
        ring->no_scheduler = true;
        sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+       r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
 
@@ -463,20 +462,25 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
 {
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *kiq_ring = &kiq->ring;
-       int i;
+       int i, r;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
+       spin_lock(&adev->gfx.kiq.ring_lock);
        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
-                                       adev->gfx.num_compute_rings))
+                                       adev->gfx.num_compute_rings)) {
+               spin_unlock(&adev->gfx.kiq.ring_lock);
                return -ENOMEM;
+       }
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
                                           RESET_QUEUES, 0, 0);
+       r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&adev->gfx.kiq.ring_lock);
 
-       return amdgpu_ring_test_helper(kiq_ring);
+       return r;
 }
 
 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
@@ -519,12 +523,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
                                                        kiq_ring->queue);
-
+       spin_lock(&adev->gfx.kiq.ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                        adev->gfx.num_compute_rings +
                                        kiq->pmf->set_resources_size);
        if (r) {
                DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+               spin_unlock(&adev->gfx.kiq.ring_lock);
                return r;
        }
 
@@ -533,6 +538,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
                kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
 
        r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&adev->gfx.kiq.ring_lock);
        if (r)
                DRM_ERROR("KCQ enable failed\n");
 
@@ -671,8 +677,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
         */
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
                kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-               if (adev->gfx.funcs->query_ras_error_count)
-                       adev->gfx.funcs->query_ras_error_count(adev, err_data);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_count)
+                       adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
                amdgpu_ras_reset_gpu(adev);
        }
        return AMDGPU_RAS_SUCCESS;
@@ -705,7 +712,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        BUG_ON(!ring->funcs->emit_rreg);
@@ -772,7 +779,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        BUG_ON(!ring->funcs->emit_wreg);
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -836,14 +843,10 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
 
 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
 {
-       if (is_support_sw_smu(adev)) {
-               smu_gfx_state_change_set(&adev->smu, state);
-       } else {
-               mutex_lock(&adev->pm.mutex);
-               if (adev->powerplay.pp_funcs &&
-                   adev->powerplay.pp_funcs->gfx_state_change_set)
-                       ((adev)->powerplay.pp_funcs->gfx_state_change_set(
-                               (adev)->powerplay.pp_handle, state));
-               mutex_unlock(&adev->pm.mutex);
-       }
+       mutex_lock(&adev->pm.mutex);
+       if (adev->powerplay.pp_funcs &&
+           adev->powerplay.pp_funcs->gfx_state_change_set)
+               ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+                       (adev)->powerplay.pp_handle, state));
+       mutex_unlock(&adev->pm.mutex);
 }
index 38af93f501e1e030c16defe2244847d265fb5680..d43fe2ed81168c57f77f7bc9560cd0f7d25b7438 100644 (file)
@@ -205,6 +205,19 @@ struct amdgpu_cu_info {
        uint32_t bitmap[4][4];
 };
 
+struct amdgpu_gfx_ras_funcs {
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       int (*ras_error_inject)(struct amdgpu_device *adev,
+                               void *inject_if);
+       int (*query_ras_error_count)(struct amdgpu_device *adev,
+                                    void *ras_error_status);
+       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
+       void (*reset_ras_error_status)(struct amdgpu_device *adev);
+       void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_gfx_funcs {
        /* get the gpu clock counter */
        uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
@@ -220,14 +233,8 @@ struct amdgpu_gfx_funcs {
                                uint32_t *dst);
        void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
                                 u32 queue, u32 vmid);
-       int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
-       int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
-       void (*reset_ras_error_count) (struct amdgpu_device *adev);
        void (*init_spm_golden)(struct amdgpu_device *adev);
-       void (*query_ras_error_status) (struct amdgpu_device *adev);
-       void (*reset_ras_error_status) (struct amdgpu_device *adev);
        void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
-       void (*enable_watchdog_timer)(struct amdgpu_device *adev);
 };
 
 struct sq_work {
@@ -330,7 +337,8 @@ struct amdgpu_gfx {
        DECLARE_BITMAP                  (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
        /*ras */
-       struct ras_common_if            *ras_if;
+       struct ras_common_if                    *ras_if;
+       const struct amdgpu_gfx_ras_funcs       *ras_funcs;
 };
 
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
index 6f7995293a1efca92903b3db3ef5904acaaa967e..4d32233cde9210df4a17e2adb5caeb10fcf46a13 100644 (file)
@@ -55,6 +55,8 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
                AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
        r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
        if (r)
                return r;
@@ -389,26 +391,46 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
-               r = adev->umc.funcs->ras_late_init(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->ras_late_init) {
+               r = adev->umc.ras_funcs->ras_late_init(adev);
                if (r)
                        return r;
        }
 
-       if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
-               r = adev->mmhub.funcs->ras_late_init(adev);
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->ras_late_init) {
+               r = adev->mmhub.ras_funcs->ras_late_init(adev);
                if (r)
                        return r;
        }
 
-       return amdgpu_xgmi_ras_late_init(adev);
+       if (!adev->gmc.xgmi.connected_to_cpu)
+               adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
+
+       if (adev->gmc.xgmi.ras_funcs &&
+           adev->gmc.xgmi.ras_funcs->ras_late_init) {
+               r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
+
+       return 0;
 }
 
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 {
-       amdgpu_umc_ras_fini(adev);
-       amdgpu_mmhub_ras_fini(adev);
-       amdgpu_xgmi_ras_fini(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->ras_fini)
+               adev->umc.ras_funcs->ras_fini(adev);
+
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->ras_fini)
+               amdgpu_mmhub_ras_fini(adev);
+
+       if (adev->gmc.xgmi.ras_funcs &&
+           adev->gmc.xgmi.ras_funcs->ras_fini)
+               adev->gmc.xgmi.ras_funcs->ras_fini(adev);
 }
 
        /*
@@ -514,6 +536,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_VEGA10:
        case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
        case CHIP_ALDEBARAN:
                /*
                 * noretry = 0 will cause kfd page fault tests fail
index 7e248a4e2fa3c1c999284916df9a2043f4a6b090..cbb7735c698848a5337c313a27aaa90436b45f70 100644 (file)
@@ -135,6 +135,14 @@ struct amdgpu_gmc_funcs {
        unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
 };
 
+struct amdgpu_xgmi_ras_funcs {
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       int (*query_ras_error_count)(struct amdgpu_device *adev,
+                                    void *ras_error_status);
+       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_xgmi {
        /* from psp */
        u64 node_id;
@@ -151,6 +159,7 @@ struct amdgpu_xgmi {
        struct ras_common_if *ras_if;
        bool connected_to_cpu;
        bool pending_reset;
+       const struct amdgpu_xgmi_ras_funcs *ras_funcs;
 };
 
 struct amdgpu_gmc {
index 8980329cded0298d42529ac0520a5f9442c97f96..540c01052b219a73e1ff70f067396eb3772cfe9b 100644 (file)
@@ -49,8 +49,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       man->size * PAGE_SIZE);
+       return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
 }
 
 /**
@@ -68,8 +67,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_gtt_mgr_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
 }
 
 static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
index af026109421a35667a1ee15af5eca43212d4ea38..90f50561b43a9d707f1782bf79b6315bede7095a 100644 (file)
@@ -199,13 +199,13 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
         * ack the interrupt if it is there
         */
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
-               if (adev->nbio.funcs &&
-                   adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
-                       adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
+                       adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
 
-               if (adev->nbio.funcs &&
-                   adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
-                       adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
+                       adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
        }
 
        return ret;
@@ -382,11 +382,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 
                        kfree(src->enabled_types);
                        src->enabled_types = NULL;
-                       if (src->data) {
-                               kfree(src->data);
-                               kfree(src);
-                               adev->irq.client[i].sources[j] = NULL;
-                       }
                }
                kfree(adev->irq.client[i].sources);
                adev->irq.client[i].sources = NULL;
index ac527e5deae6765934724e8cb351b56ace483578..cf6116648322aa1411d8cc1d611cbabed9861d3b 100644 (file)
@@ -62,7 +62,6 @@ struct amdgpu_irq_src {
        unsigned                                num_types;
        atomic_t                                *enabled_types;
        const struct amdgpu_irq_src_funcs       *funcs;
-       void *data;
 };
 
 struct amdgpu_irq_client {
index ada807de978b3fbe060c0ec604c3408bf370bca5..39ee88d29cca1f3d0d06dfd1c487172820ec4362 100644 (file)
@@ -159,7 +159,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                goto out;
        }
 
-       if (amdgpu_device_supports_atpx(dev) &&
+       if (amdgpu_device_supports_px(dev) &&
            (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
                adev->runpm = true;
                dev_info(adev->dev, "Using ATPX for runtime pm\n");
@@ -200,9 +200,13 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 
        if (adev->runpm) {
                /* only need to skip on ATPX */
-               if (amdgpu_device_supports_atpx(dev) &&
-                   !amdgpu_is_atpx_hybrid())
+               if (amdgpu_device_supports_px(dev))
                        dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+               /* we want direct complete for BOCO */
+               if (amdgpu_device_supports_boco(dev))
+                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
+                                               DPM_FLAG_SMART_SUSPEND |
+                                               DPM_FLAG_MAY_SKIP_RESUME);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_allow(dev->dev);
@@ -785,9 +789,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
                        dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
-               dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+               dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
-               dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+               dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->cu_active_number = adev->gfx.cu_info.number;
                dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info->ce_ram_size = adev->gfx.ce_ram_size;
index 1ae9bdae7311412a920cc01c033c61ff9b52c1ff..11aa29933c1f8c968101bb8b0b09a2f8f2251f68 100644 (file)
 #ifndef __AMDGPU_MMHUB_H__
 #define __AMDGPU_MMHUB_H__
 
-struct amdgpu_mmhub_funcs {
-       void (*ras_init)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras_funcs {
        int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
        void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
+                                     void *ras_error_status);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
        void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mmhub_funcs {
        u64 (*get_fb_location)(struct amdgpu_device *adev);
        void (*init)(struct amdgpu_device *adev);
        int (*gart_enable)(struct amdgpu_device *adev);
@@ -40,12 +44,12 @@ struct amdgpu_mmhub_funcs {
                                uint64_t page_table_base);
        void (*update_power_gating)(struct amdgpu_device *adev,
                                 bool enable);
-       void (*query_ras_error_status)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_mmhub {
        struct ras_common_if *ras_if;
        const struct amdgpu_mmhub_funcs *funcs;
+       const struct amdgpu_mmhub_ras_funcs *ras_funcs;
 };
 
 int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
index 7c11bce4514bd0c7f47d255f151603848a7f2832..25ee53545837dce50ab69642128a72d66bef1dcc 100644 (file)
@@ -47,6 +47,17 @@ struct nbio_hdp_flush_reg {
        u32 ref_and_mask_sdma7;
 };
 
+struct amdgpu_nbio_ras_funcs {
+       void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+       void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+       int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+       int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+       void (*query_ras_error_count)(struct amdgpu_device *adev,
+                                     void *ras_error_status);
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_nbio_funcs {
        const struct nbio_hdp_flush_reg *hdp_flush_reg;
        u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
@@ -79,13 +90,6 @@ struct amdgpu_nbio_funcs {
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
-       void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
-       void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
-       int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
-       int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
-       int (*ras_late_init)(struct amdgpu_device *adev);
        void (*enable_aspm)(struct amdgpu_device *adev,
                            bool enable);
        void (*program_aspm)(struct amdgpu_device *adev);
@@ -97,6 +101,7 @@ struct amdgpu_nbio {
        struct amdgpu_irq_src ras_err_event_athub_irq;
        struct ras_common_if *ras_if;
        const struct amdgpu_nbio_funcs *funcs;
+       const struct amdgpu_nbio_ras_funcs *ras_funcs;
 };
 
 int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
index ac1bb5089260483c37b4488f6799a84c8d61c99e..1345f7eba011bf49414b4d46f8123a1b36ec7126 100644 (file)
@@ -77,6 +77,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+       struct amdgpu_bo_user *ubo;
 
        if (bo->tbo.pin_count > 0)
                amdgpu_bo_subtract_pin_size(bo);
@@ -94,7 +95,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
        }
        amdgpu_bo_unref(&bo->parent);
 
-       kfree(bo->metadata);
+       if (bo->tbo.type == ttm_bo_type_device) {
+               ubo = to_amdgpu_bo_user(bo);
+               kfree(ubo->metadata);
+       }
+
        kfree(bo);
 }
 
@@ -248,6 +253,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
        bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        if (!*bo_ptr) {
                r = amdgpu_bo_create(adev, &bp, bo_ptr);
@@ -543,9 +549,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        if (!amdgpu_bo_validate_size(adev, size, bp->domain))
                return -ENOMEM;
 
-       *bo_ptr = NULL;
+       BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
-       bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+       *bo_ptr = NULL;
+       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
@@ -635,6 +642,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
                AMDGPU_GEM_CREATE_SHADOW;
        bp.type = ttm_bo_type_kernel;
        bp.resv = bo->tbo.base.resv;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
        if (!r) {
@@ -669,6 +677,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        int r;
 
        bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+
        r = amdgpu_bo_do_create(adev, bp, bo_ptr);
        if (r)
                return r;
@@ -690,6 +699,34 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @ubo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be used by user application;
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+                         struct amdgpu_bo_param *bp,
+                         struct amdgpu_bo_user **ubo_ptr)
+{
+       struct amdgpu_bo *bo_ptr;
+       int r;
+
+       bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+       bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+       r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
+       if (r)
+               return r;
+
+       *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
+       return r;
+}
 /**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
@@ -1024,13 +1061,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        struct ttm_resource_manager *man;
 
-       /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-#ifndef CONFIG_HIBERNATION
-       if (adev->flags & AMD_IS_APU) {
-               /* Useless to evict on IGP chips */
+       if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
+               /* No need to evict vram on APUs for suspend to ram */
                return 0;
        }
-#endif
 
        man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
        return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
@@ -1095,25 +1129,6 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
        }
 }
 
-/**
- * amdgpu_bo_fbdev_mmap - mmap fbdev memory
- * @bo: &amdgpu_bo buffer object
- * @vma: vma as input from the fbdev mmap method
- *
- * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
-                            struct vm_area_struct *vma)
-{
-       if (vma->vm_pgoff != 0)
-               return -EACCES;
-
-       return ttm_bo_mmap_obj(vma, &bo->tbo);
-}
-
 /**
  * amdgpu_bo_set_tiling_flags - set tiling flags
  * @bo: &amdgpu_bo buffer object
@@ -1128,12 +1143,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_bo_user *ubo;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
        if (adev->family <= AMDGPU_FAMILY_CZ &&
            AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
                return -EINVAL;
 
-       bo->tiling_flags = tiling_flags;
+       ubo = to_amdgpu_bo_user(bo);
+       ubo->tiling_flags = tiling_flags;
        return 0;
 }
 
@@ -1147,10 +1165,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
+       struct amdgpu_bo_user *ubo;
+
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
        dma_resv_assert_held(bo->tbo.base.resv);
+       ubo = to_amdgpu_bo_user(bo);
 
        if (tiling_flags)
-               *tiling_flags = bo->tiling_flags;
+               *tiling_flags = ubo->tiling_flags;
 }
 
 /**
@@ -1169,13 +1191,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
                            uint32_t metadata_size, uint64_t flags)
 {
+       struct amdgpu_bo_user *ubo;
        void *buffer;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+       ubo = to_amdgpu_bo_user(bo);
        if (!metadata_size) {
-               if (bo->metadata_size) {
-                       kfree(bo->metadata);
-                       bo->metadata = NULL;
-                       bo->metadata_size = 0;
+               if (ubo->metadata_size) {
+                       kfree(ubo->metadata);
+                       ubo->metadata = NULL;
+                       ubo->metadata_size = 0;
                }
                return 0;
        }
@@ -1187,10 +1212,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        if (buffer == NULL)
                return -ENOMEM;
 
-       kfree(bo->metadata);
-       bo->metadata_flags = flags;
-       bo->metadata = buffer;
-       bo->metadata_size = metadata_size;
+       kfree(ubo->metadata);
+       ubo->metadata_flags = flags;
+       ubo->metadata = buffer;
+       ubo->metadata_size = metadata_size;
 
        return 0;
 }
@@ -1214,21 +1239,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                           size_t buffer_size, uint32_t *metadata_size,
                           uint64_t *flags)
 {
+       struct amdgpu_bo_user *ubo;
+
        if (!buffer && !metadata_size)
                return -EINVAL;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+       ubo = to_amdgpu_bo_user(bo);
        if (buffer) {
-               if (buffer_size < bo->metadata_size)
+               if (buffer_size < ubo->metadata_size)
                        return -EINVAL;
 
-               if (bo->metadata_size)
-                       memcpy(buffer, bo->metadata, bo->metadata_size);
+               if (ubo->metadata_size)
+                       memcpy(buffer, ubo->metadata, ubo->metadata_size);
        }
 
        if (metadata_size)
-               *metadata_size = bo->metadata_size;
+               *metadata_size = ubo->metadata_size;
        if (flags)
-               *flags = bo->metadata_flags;
+               *flags = ubo->metadata_flags;
 
        return 0;
 }
index 54ceb065e5463d72901cebb6bbc1b49785e0c9e5..2d1fefbe1e99ff65216fd0eca658dbb431cea756 100644 (file)
 #define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
 #define AMDGPU_BO_MAX_PLACEMENTS       3
 
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+
 struct amdgpu_bo_param {
        unsigned long                   size;
        int                             byte_align;
+       u32                             bo_ptr_size;
        u32                             domain;
        u32                             preferred_domain;
        u64                             flags;
@@ -89,10 +92,6 @@ struct amdgpu_bo {
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
        u64                             flags;
-       u64                             tiling_flags;
-       u64                             metadata_flags;
-       void                            *metadata;
-       u32                             metadata_size;
        unsigned                        prime_shared_count;
        /* per VM structure for page tables and with virtual addresses */
        struct amdgpu_vm_bo_base        *vm_bo;
@@ -111,6 +110,15 @@ struct amdgpu_bo {
        struct kgd_mem                  *kfd_bo;
 };
 
+struct amdgpu_bo_user {
+       struct amdgpu_bo                bo;
+       u64                             tiling_flags;
+       u64                             metadata_flags;
+       void                            *metadata;
+       u32                             metadata_size;
+
+};
+
 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
 {
        return container_of(tbo, struct amdgpu_bo, tbo);
@@ -254,6 +262,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
                               uint64_t offset, uint64_t size, uint32_t domain,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+                         struct amdgpu_bo_param *bp,
+                         struct amdgpu_bo_user **ubo_ptr);
 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
                           void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
@@ -268,8 +279,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
-                               struct vm_area_struct *vma);
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
index bae304b0d67a17a00ccec21965b603f75475d3ee..9e769cf6095be2862e2011bc52704d82fe4a74e9 100644 (file)
@@ -556,6 +556,24 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
        return ret;
 }
 
+static int psp_boot_config_set(struct amdgpu_device *adev)
+{
+       struct psp_context *psp = &adev->psp;
+       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+       if (adev->asic_type != CHIP_SIENNA_CICHLID)
+               return 0;
+
+       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+       cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+       cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
+       cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
+       cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
+
+       return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
 static int psp_rl_load(struct amdgpu_device *adev)
 {
        struct psp_context *psp = &adev->psp;
@@ -1912,6 +1930,11 @@ static int psp_hw_start(struct psp_context *psp)
                return ret;
        }
 
+       ret = psp_boot_config_set(adev);
+       if (ret) {
+               DRM_WARN("PSP set boot config@\n");
+       }
+
        ret = psp_tmr_init(psp);
        if (ret) {
                DRM_ERROR("PSP tmr init failed!\n");
@@ -2146,9 +2169,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
        if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
                return 0;
 
-
-       if (amdgpu_in_reset(adev) && ras && ras->supported &&
-               adev->asic_type == CHIP_ARCTURUS) {
+       if ((amdgpu_in_reset(adev) &&
+            ras && ras->supported &&
+            (adev->asic_type == CHIP_ARCTURUS ||
+             adev->asic_type == CHIP_VEGA20)) ||
+            (adev->in_runpm &&
+             adev->asic_type >= CHIP_NAVI10 &&
+             adev->asic_type <= CHIP_NAVI12)) {
                ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
                if (ret) {
                        DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2201,6 +2228,22 @@ static bool fw_load_skip_check(struct psp_context *psp,
        return false;
 }
 
+int psp_load_fw_list(struct psp_context *psp,
+                    struct amdgpu_firmware_info **ucode_list, int ucode_count)
+{
+       int ret = 0, i;
+       struct amdgpu_firmware_info *ucode;
+
+       for (i = 0; i < ucode_count; ++i) {
+               ucode = ucode_list[i];
+               psp_print_fw_hdr(psp, ucode);
+               ret = psp_execute_np_fw_load(psp, ucode);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+}
+
 static int psp_np_fw_load(struct psp_context *psp)
 {
        int i, ret;
@@ -2967,7 +3010,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
                return ret;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+       return sysfs_emit(buf, "%x\n", fw_ver);
 }
 
 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
index 64f1433908355aed26cea81b3a8905236aba6789..46a5328e00e0bbcf949527f593a297da2d88bd73 100644 (file)
@@ -420,4 +420,7 @@ int psp_init_ta_microcode(struct psp_context *psp,
                          const char *chip_name);
 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
                                        uint64_t *output_ptr);
+
+int psp_load_fw_list(struct psp_context *psp,
+                    struct amdgpu_firmware_info **ucode_list, int ucode_count);
 #endif
index 0e16683876aa482128ebd6f9333872debb06789a..0541196ae1ed80c9358e01fd4b74cddf187db207 100644 (file)
@@ -99,6 +99,49 @@ static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
        return false;
 }
 
+static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
+{
+       struct ras_err_data err_data = {0, 0, 0, NULL};
+       struct eeprom_table_record err_rec;
+
+       if ((address >= adev->gmc.mc_vram_size) ||
+           (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+               dev_warn(adev->dev,
+                        "RAS WARN: input address 0x%llx is invalid.\n",
+                        address);
+               return -EINVAL;
+       }
+
+       if (amdgpu_ras_check_bad_page(adev, address)) {
+               dev_warn(adev->dev,
+                        "RAS WARN: 0x%llx has been marked as bad page!\n",
+                        address);
+               return 0;
+       }
+
+       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+       err_rec.address = address;
+       err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
+       err_rec.ts = (uint64_t)ktime_get_real_seconds();
+       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+       err_data.err_addr = &err_rec;
+       err_data.err_addr_cnt = 1;
+
+       if (amdgpu_bad_page_threshold != 0) {
+               amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+                                        err_data.err_addr_cnt);
+               amdgpu_ras_save_bad_pages(adev);
+       }
+
+       dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
+       dev_warn(adev->dev, "Clear EEPROM:\n");
+       dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
+
+       return 0;
+}
+
 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
 {
@@ -178,11 +221,25 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
                op = 1;
        else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
                op = 2;
+       else if (sscanf(str, "retire_page") == 0)
+               op = 3;
        else if (str[0] && str[1] && str[2] && str[3])
                /* ascii string, but commands are not matched. */
                return -EINVAL;
 
        if (op != -1) {
+
+               if (op == 3) {
+                       if (sscanf(str, "%*s %llu", &address) != 1)
+                               if (sscanf(str, "%*s 0x%llx", &address) != 1)
+                                       return -EINVAL;
+
+                       data->op = op;
+                       data->inject.address = address;
+
+                       return 0;
+               }
+
                if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
                        return -EINVAL;
 
@@ -310,6 +367,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
        if (ret)
                return -EINVAL;
 
+       if (data.op == 3)
+       {
+               ret = amdgpu_reserve_page_direct(adev, data.inject.address);
+
+               if (ret)
+                       return size;
+               else
+                       return ret;
+       }
+
        if (!amdgpu_ras_is_supported(adev, data.head.block))
                return -EINVAL;
 
@@ -431,15 +498,13 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
        };
 
        if (!amdgpu_ras_get_error_query_ready(obj->adev))
-               return snprintf(buf, PAGE_SIZE,
-                               "Query currently inaccessible\n");
+               return sysfs_emit(buf, "Query currently inaccessible\n");
 
        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
-                       "ue", info.ue_count,
-                       "ce", info.ce_count);
+       return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+                         "ce", info.ce_count);
 }
 
 /* obj begin */
@@ -449,11 +514,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
 
 static inline void put_obj(struct ras_manager *obj)
 {
-       if (obj && --obj->use == 0)
+       if (obj && (--obj->use == 0))
                list_del(&obj->node);
-       if (obj && obj->use < 0) {
-                DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
-       }
+       if (obj && (obj->use < 0))
+               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
 }
 
 /* make one obj and return it. */
@@ -777,13 +841,15 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
 
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__UMC:
-               if (adev->umc.funcs->query_ras_error_count)
-                       adev->umc.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->umc.ras_funcs &&
+                   adev->umc.ras_funcs->query_ras_error_count)
+                       adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               if (adev->umc.funcs->query_ras_error_address)
-                       adev->umc.funcs->query_ras_error_address(adev, &err_data);
+               if (adev->umc.ras_funcs &&
+                   adev->umc.ras_funcs->query_ras_error_address)
+                       adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
                break;
        case AMDGPU_RAS_BLOCK__SDMA:
                if (adev->sdma.funcs->query_ras_error_count) {
@@ -793,25 +859,32 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
                }
                break;
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->query_ras_error_count)
-                       adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_count)
+                       adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
 
-               if (adev->gfx.funcs->query_ras_error_status)
-                       adev->gfx.funcs->query_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_status)
+                       adev->gfx.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->query_ras_error_count)
-                       adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_count)
+                       adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
 
-               if (adev->mmhub.funcs->query_ras_error_status)
-                       adev->mmhub.funcs->query_ras_error_status(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_status)
+                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
-               if (adev->nbio.funcs->query_ras_error_count)
-                       adev->nbio.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->query_ras_error_count)
+                       adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
                break;
        case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+               if (adev->gmc.xgmi.ras_funcs &&
+                   adev->gmc.xgmi.ras_funcs->query_ras_error_count)
+                       adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
                break;
        default:
                break;
@@ -848,15 +921,18 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
 
        switch (block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->reset_ras_error_count)
-                       adev->gfx.funcs->reset_ras_error_count(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->reset_ras_error_count)
+                       adev->gfx.ras_funcs->reset_ras_error_count(adev);
 
-               if (adev->gfx.funcs->reset_ras_error_status)
-                       adev->gfx.funcs->reset_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->reset_ras_error_status)
+                       adev->gfx.ras_funcs->reset_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->reset_ras_error_count)
-                       adev->mmhub.funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->reset_ras_error_count)
+                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
                break;
        case AMDGPU_RAS_BLOCK__SDMA:
                if (adev->sdma.funcs->reset_ras_error_count)
@@ -921,12 +997,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->ras_error_inject)
-                       ret = adev->gfx.funcs->ras_error_inject(adev, info);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->ras_error_inject)
+                       ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
                else
                        ret = -EINVAL;
                break;
        case AMDGPU_RAS_BLOCK__UMC:
+       case AMDGPU_RAS_BLOCK__SDMA:
        case AMDGPU_RAS_BLOCK__MMHUB:
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
                ret = psp_ras_trigger_error(&adev->psp, &block_info);
@@ -1508,12 +1586,14 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
         */
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->query_ras_error_status)
-                       adev->gfx.funcs->query_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_status)
+                       adev->gfx.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->query_ras_error_status)
-                       adev->mmhub.funcs->query_ras_error_status(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_status)
+                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
                break;
        default:
                break;
@@ -1933,15 +2013,13 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
 {
-       if (adev->asic_type != CHIP_VEGA10 &&
-               adev->asic_type != CHIP_VEGA20 &&
-               adev->asic_type != CHIP_ARCTURUS &&
-               adev->asic_type != CHIP_SIENNA_CICHLID)
-               return 1;
-       else
-               return 0;
+       return adev->asic_type == CHIP_VEGA10 ||
+               adev->asic_type == CHIP_VEGA20 ||
+               adev->asic_type == CHIP_ARCTURUS ||
+               adev->asic_type == CHIP_ALDEBARAN ||
+               adev->asic_type == CHIP_SIENNA_CICHLID;
 }
 
 /*
@@ -1960,22 +2038,32 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
        *supported = 0;
 
        if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
-               amdgpu_ras_check_asic_type(adev))
+           !amdgpu_ras_asic_supported(adev))
                return;
 
-       if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
-               dev_info(adev->dev, "MEM ECC is active.\n");
-               *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
-                               1 << AMDGPU_RAS_BLOCK__DF);
-       } else
-               dev_info(adev->dev, "MEM ECC is not presented.\n");
+       if (!adev->gmc.xgmi.connected_to_cpu) {
+               if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+                       dev_info(adev->dev, "MEM ECC is active.\n");
+                       *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+                                       1 << AMDGPU_RAS_BLOCK__DF);
+               } else {
+                       dev_info(adev->dev, "MEM ECC is not presented.\n");
+               }
 
-       if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
-               dev_info(adev->dev, "SRAM ECC is active.\n");
-               *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
-                               1 << AMDGPU_RAS_BLOCK__DF);
-       } else
-               dev_info(adev->dev, "SRAM ECC is not presented.\n");
+               if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+                       dev_info(adev->dev, "SRAM ECC is active.\n");
+                       *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+                                       1 << AMDGPU_RAS_BLOCK__DF);
+               } else {
+                       dev_info(adev->dev, "SRAM ECC is not presented.\n");
+               }
+       } else {
+               /* driver only manages a few IP blocks RAS feature
+                * when GPU is connected cpu through XGMI */
+               *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
+                               1 << AMDGPU_RAS_BLOCK__SDMA |
+                               1 << AMDGPU_RAS_BLOCK__MMHUB);
+       }
 
        /* hw_supported needs to be aligned with RAS block mask. */
        *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -2024,14 +2112,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        /* Might need get this flag from vbios. */
        con->flags = RAS_DEFAULT_FLAGS;
 
-       if (adev->nbio.funcs->init_ras_controller_interrupt) {
-               r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+       /* initialize nbio ras function ahead of any other
+        * ras functions so hardware fatal error interrupt
+        * can be enabled as early as possible */
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
+       case CHIP_ALDEBARAN:
+               if (!adev->gmc.xgmi.connected_to_cpu)
+                       adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+               break;
+       default:
+               /* nbio ras is not available */
+               break;
+       }
+
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->init_ras_controller_interrupt) {
+               r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
                if (r)
                        goto release_con;
        }
 
-       if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
-               r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
+               r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
                if (r)
                        goto release_con;
        }
@@ -2052,6 +2157,32 @@ release_con:
        return r;
 }
 
+static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
+{
+       if (adev->gmc.xgmi.connected_to_cpu)
+               return 1;
+       return 0;
+}
+
+static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
+                                       struct ras_common_if *ras_block)
+{
+       struct ras_query_if info = {
+               .head = *ras_block,
+       };
+
+       if (!amdgpu_persistent_edc_harvesting_supported(adev))
+               return 0;
+
+       if (amdgpu_ras_query_error_status(adev, &info) != 0)
+               DRM_WARN("RAS init harvest failure");
+
+       if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
+               DRM_WARN("RAS init harvest reset failure");
+
+       return 0;
+}
+
 /* helper function to handle common stuff in ip late init phase */
 int amdgpu_ras_late_init(struct amdgpu_device *adev,
                         struct ras_common_if *ras_block,
@@ -2081,6 +2212,9 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
                        return r;
        }
 
+       /* check for errors on warm reset edc persisant supported ASIC */
+       amdgpu_persistent_edc_harvesting(adev, ras_block);
+
        /* in resume phase, no need to create ras fs node */
        if (adev->in_suspend || amdgpu_in_reset(adev))
                return 0;
index a05dbbbd9803b80ba909905052a633046f1b6522..f40c871da0c623d584953d23b292b5772f91432f 100644 (file)
@@ -31,6 +31,7 @@
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342   0xA0
 #define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID   0xA0
+#define EEPROM_I2C_TARGET_ADDR_ALDEBARAN        0xA0
 
 /*
  * The 2 macros bellow represent the actual size in bytes that
@@ -64,7 +65,8 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
 {
        if ((adev->asic_type == CHIP_VEGA20) ||
            (adev->asic_type == CHIP_ARCTURUS) ||
-           (adev->asic_type == CHIP_SIENNA_CICHLID))
+           (adev->asic_type == CHIP_SIENNA_CICHLID) ||
+           (adev->asic_type == CHIP_ALDEBARAN))
                return true;
 
        return false;
@@ -106,6 +108,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
                *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
                break;
 
+       case CHIP_ALDEBARAN:
+               *i2c_addr = EEPROM_I2C_TARGET_ADDR_ALDEBARAN;
+               break;
+
        default:
                return false;
        }
index b49a61d07d605e307a84812fa1c37d4755dcce9e..40f2adf305bc26a3bb7945ee6b68d844b6ff9fc6 100644 (file)
@@ -64,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
        BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 
        node = res->mm_node;
-       while (start > node->size << PAGE_SHIFT)
+       while (start >= node->size << PAGE_SHIFT)
                start -= node++->size << PAGE_SHIFT;
 
        cur->start = (node->start << PAGE_SHIFT) + start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
new file mode 100644 (file)
index 0000000..02afd41
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_reset.h"
+#include "aldebaran.h"
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_handler *handler)
+{
+       /* TODO: Check if handler exists? */
+       list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
+       return 0;
+}
+
+int amdgpu_reset_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_ALDEBARAN:
+               ret = aldebaran_reset_init(adev);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int amdgpu_reset_fini(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_ALDEBARAN:
+               ret = aldebaran_reset_fini(adev);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+                                  struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_reset_handler *reset_handler = NULL;
+
+       if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
+               reset_handler = adev->reset_cntl->get_reset_handler(
+                       adev->reset_cntl, reset_context);
+       if (!reset_handler)
+               return -ENOSYS;
+
+       return reset_handler->prepare_hwcontext(adev->reset_cntl,
+                                               reset_context);
+}
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+                              struct amdgpu_reset_context *reset_context)
+{
+       int ret;
+       struct amdgpu_reset_handler *reset_handler = NULL;
+
+       if (adev->reset_cntl)
+               reset_handler = adev->reset_cntl->get_reset_handler(
+                       adev->reset_cntl, reset_context);
+       if (!reset_handler)
+               return -ENOSYS;
+
+       ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
+       if (ret)
+               return ret;
+
+       return reset_handler->restore_hwcontext(adev->reset_cntl,
+                                               reset_context);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
new file mode 100644 (file)
index 0000000..e00d38d
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RESET_H__
+#define __AMDGPU_RESET_H__
+
+#include "amdgpu.h"
+
+enum AMDGPU_RESET_FLAGS {
+
+       AMDGPU_NEED_FULL_RESET = 0,
+       AMDGPU_SKIP_HW_RESET = 1,
+};
+
+struct amdgpu_reset_context {
+       enum amd_reset_method method;
+       struct amdgpu_device *reset_req_dev;
+       struct amdgpu_job *job;
+       struct amdgpu_hive_info *hive;
+       unsigned long flags;
+};
+
+struct amdgpu_reset_handler {
+       enum amd_reset_method reset_method;
+       struct list_head handler_list;
+       int (*prepare_env)(struct amdgpu_reset_control *reset_ctl,
+                          struct amdgpu_reset_context *context);
+       int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+                                struct amdgpu_reset_context *context);
+       int (*perform_reset)(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_context *context);
+       int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+                                struct amdgpu_reset_context *context);
+       int (*restore_env)(struct amdgpu_reset_control *reset_ctl,
+                          struct amdgpu_reset_context *context);
+
+       int (*do_reset)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_reset_control {
+       void *handle;
+       struct work_struct reset_work;
+       struct mutex reset_lock;
+       struct list_head reset_handlers;
+       atomic_t in_reset;
+       enum amd_reset_method active_reset;
+       struct amdgpu_reset_handler *(*get_reset_handler)(
+               struct amdgpu_reset_control *reset_ctl,
+               struct amdgpu_reset_context *context);
+       void (*async_reset)(struct work_struct *work);
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev);
+int amdgpu_reset_fini(struct amdgpu_device *adev);
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+                                  struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+                              struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_handler *handler);
+
+#endif
index b644c78475fd8de1e53677bccf3333d4db104fa0..688624ebe42110c314a4f4e551cf96509919d6f7 100644 (file)
@@ -164,7 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int hw_prio)
+                    unsigned int irq_type, unsigned int hw_prio,
+                    atomic_t *sched_score)
 {
        int r;
        int sched_hw_submission = amdgpu_sched_hw_submission;
@@ -189,7 +190,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                ring->adev = adev;
                ring->idx = adev->num_rings++;
                adev->rings[ring->idx] = ring;
-               r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
+               r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
+                                                 sched_score);
                if (r)
                        return r;
        }
index 56acec1075acd7defabce37d0bf6862af174d7f6..ca16228352969e804d8e8498018630ab2fe14bdc 100644 (file)
@@ -111,7 +111,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-                                 unsigned num_hw_submission);
+                                 unsigned num_hw_submission,
+                                 atomic_t *sched_score);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
@@ -282,7 +283,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned int ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int prio);
+                    unsigned int irq_type, unsigned int prio,
+                    atomic_t *sched_score);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
                                                uint32_t reg0, uint32_t val0,
index aeaaae713c59df1cc029605ebb26e3e2816bcaa9..4fc2ce8ce8ab597ff557c33528d827d78fad964b 100644 (file)
@@ -127,7 +127,8 @@ struct amdgpu_rlc_funcs {
        void (*reset)(struct amdgpu_device *adev);
        void (*start)(struct amdgpu_device *adev);
        void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
-       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
+       u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
        bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
index 9cf856c94f944ef1865774b2e8b3f39edca5d3f8..5369c8dd076400ca28a6aaa3727fbb1f121ee2f2 100644 (file)
@@ -95,9 +95,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
        struct drm_device *dev = adev_to_drm(adev);
        uint32_t phy_id;
        uint32_t op;
-       int i;
        char str[64];
-       char i2c_output[256];
        int ret;
 
        if (*pos || size > sizeof(str) - 1)
@@ -139,11 +137,9 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
                ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
                if (!ret) {
                        if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
-                               memset(i2c_output,  0, sizeof(i2c_output));
-                               for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
-                                       sprintf(i2c_output, "%s 0x%X", i2c_output,
-                                               securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
-                               dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+                               dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n",
+                                        TA_SECUREDISPLAY_I2C_BUFFER_SIZE,
+                                        securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf);
                        } else {
                                psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
                        }
index 7b230bcbf2c6ee7e1f5f93d57f5d2a4110177d87..909d830b513e24184e4c2cc8b3b466de51692d25 100644 (file)
@@ -62,6 +62,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        bp.flags = 0;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_create(adev, &bp, &vram_obj);
        if (r) {
index 1c6131489a851ae8d4447d4ffa70b5093de73bc4..3bef0432cac2f7dba196c2e23c64f25fa2a93fea 100644 (file)
@@ -823,15 +823,14 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
-
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+       int r;
 
        /* Allocate an SG array and squash pages into it */
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
@@ -861,13 +860,12 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
        /* double check that we don't free the table twice */
-       if (!ttm->sg->sgl)
+       if (!ttm->sg || !ttm->sg->sgl)
                return;
 
        /* unmap the pages mapped to the device */
@@ -1087,13 +1085,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
 
-       if (!gtt->bound)
-               return;
-
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr)
                amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
 
+       if (!gtt->bound)
+               return;
+
        if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
                return;
 
@@ -1503,7 +1501,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
                                memcpy(buf, &value, bytes);
                        }
                } else {
-                       bytes = cursor.size & 0x3ull;
+                       bytes = cursor.size & ~0x3ULL;
                        amdgpu_device_vram_access(adev, cursor.start,
                                                  (uint32_t *)buf, bytes,
                                                  write);
index a2975c8092a919d96db119c4dae0365141e4f6b0..ea6f99be070bd87501f5abeb9aa88323f888e7a8 100644 (file)
@@ -60,8 +60,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
        }
 
        /* ras init of specific umc version */
-       if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
-               adev->umc.funcs->err_cnt_init(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->err_cnt_init)
+               adev->umc.ras_funcs->err_cnt_init(adev);
 
        return 0;
 
@@ -95,12 +96,12 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 
        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_count)
-           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->query_ras_error_count)
+           adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
 
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_address &&
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->query_ras_error_address &&
            adev->umc.max_ras_err_cnt_per_query) {
                err_data->err_addr =
                        kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -116,7 +117,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+               adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
        }
 
        /* only uncorrectable error needs gpu reset */
index 18381449365800c70ca8e5855c85af2da94f8454..bbcccf53080dd83c20ac8db513642403671e834b 100644 (file)
 #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
 #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
 
-struct amdgpu_umc_funcs {
+struct amdgpu_umc_ras_funcs {
        void (*err_cnt_init)(struct amdgpu_device *adev);
        int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
        void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
+                                     void *ras_error_status);
        void (*query_ras_error_address)(struct amdgpu_device *adev,
                                        void *ras_error_status);
+};
+
+struct amdgpu_umc_funcs {
        void (*init_registers)(struct amdgpu_device *adev);
 };
 
@@ -59,6 +63,7 @@ struct amdgpu_umc {
        struct ras_common_if *ras_if;
 
        const struct amdgpu_umc_funcs *funcs;
+       const struct amdgpu_umc_ras_funcs *ras_funcs;
 };
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
index e2ed4689118a9980baef80b677efaf22614a7787..c6dbc08016045d3216fad0eebbf6b7435786e968 100644 (file)
@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                if ((adev->asic_type == CHIP_POLARIS10 ||
                     adev->asic_type == CHIP_POLARIS11) &&
                    (adev->uvd.fw_version < FW_1_66_16))
-                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
                                  version_major, version_minor);
        } else {
                unsigned int enc_major, enc_minor, dec_minor;
index 1843bf8de0cfba4127d9bed63cac7ca35e8dc1db..bc76cab676974152f6e8d713fca73d6c314fa3df 100644 (file)
@@ -212,6 +212,7 @@ struct amdgpu_vcn_inst {
        void                    *saved_bo;
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+       atomic_t                sched_score;
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
index d9ffff8eb41d2f1a1ef8c9909a697910a375bf30..0c9c5255aa429742ce61a22d55f6004a9783c650 100644 (file)
@@ -466,6 +466,8 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
                        ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
                adev->virt.gim_feature =
                        ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+               adev->virt.reg_access =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
 
                break;
        default:
@@ -617,6 +619,14 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                                if (adev->virt.ras_init_done)
                                        amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
                        }
+       } else if (adev->bios != NULL) {
+               adev->virt.fw_reserve.p_pf2vf =
+                       (struct amd_sriov_msg_pf2vf_info_header *)
+                       (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+               amdgpu_virt_read_pf2vf_data(adev);
+
+               return;
        }
 
        if (adev->virt.vf2pf_update_interval_ms != 0) {
index 8dd624c20f895ed8f5d57c17e85eb06e92a5fe0d..383d4bdc3fb53fa028a59523281f99114958017d 100644 (file)
@@ -104,6 +104,17 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
        /* PP ONE VF MODE in GIM */
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+       /* Indirect Reg Access enabled */
+       AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+};
+
+enum AMDGIM_REG_ACCESS_FLAG {
+       /* Use PSP to program IH_RB_CNTL */
+       AMDGIM_FEATURE_IH_REG_PSP_EN     = (1 << 0),
+       /* Use RLC to program MMHUB regs */
+       AMDGIM_FEATURE_MMHUB_REG_RLC_EN  = (1 << 1),
+       /* Use RLC to program GC regs */
+       AMDGIM_FEATURE_GC_REG_RLC_EN     = (1 << 2),
 };
 
 struct amdgim_pf2vf_info_v1 {
@@ -217,6 +228,7 @@ struct amdgpu_virt {
        bool tdr_debug;
        struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
        bool ras_init_done;
+       uint32_t reg_access;
 
        /* vf2pf message */
        struct delayed_work vf2pf_work;
@@ -238,6 +250,22 @@ struct amdgpu_virt {
 #define amdgpu_sriov_fullaccess(adev) \
 (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
 
+#define amdgpu_sriov_reg_indirect_en(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
+
+#define amdgpu_sriov_reg_indirect_ih(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
+
+#define amdgpu_sriov_reg_indirect_mmhub(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_gc(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+
 #define amdgpu_passthrough(adev) \
 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
 
index f314e1e269cdff8fa81b993f3a25136769813abf..0ffdf847cad0a2e7c4575be981c8b72666c3c9dd 100644 (file)
@@ -869,6 +869,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
        bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+       bp->bo_ptr_size = sizeof(struct amdgpu_bo);
        if (vm->use_cpu_for_update)
                bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
@@ -2197,8 +2198,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        uint64_t eaddr;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2263,8 +2264,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        int r;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2409,7 +2410,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                        after->start = eaddr + 1;
                        after->last = tmp->last;
                        after->offset = tmp->offset;
-                       after->offset += after->start - tmp->start;
+                       after->offset += (after->start - tmp->start) << PAGE_SHIFT;
                        after->flags = tmp->flags;
                        after->bo_va = tmp->bo_va;
                        list_add(&after->list, &tmp->bo_va->invalids);
@@ -3300,7 +3301,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_bo *root;
        uint64_t value, flags;
        struct amdgpu_vm *vm;
-       long r;
+       int r;
 
        spin_lock(&adev->vm_manager.pasid_lock);
        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3350,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                value = 0;
        }
 
+       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       if (r) {
+               pr_debug("failed %d to reserve fence slot\n", r);
+               goto error_unlock;
+       }
+
        r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
                                        addr, flags, value, NULL, NULL,
                                        NULL);
@@ -3360,7 +3367,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 error_unlock:
        amdgpu_bo_unreserve(root);
        if (r < 0)
-               DRM_ERROR("Can't handle page fault (%ld)\n", r);
+               DRM_ERROR("Can't handle page fault (%d)\n", r);
 
 error_unref:
        amdgpu_bo_unref(&root);
index b2fc475ce6f705e5d044fed6f2e5916ef77992cf..592a2dd16493eab4aa8abfaf15ba90c2c0b8efe4 100644 (file)
@@ -52,7 +52,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
 }
 
 /**
@@ -69,7 +69,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
 }
 
 /**
@@ -87,8 +87,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_vram_mgr_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
 }
 
 /**
@@ -106,8 +105,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_vram_mgr_vis_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
 }
 
 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -119,27 +117,27 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
 
        switch (adev->gmc.vram_vendor) {
        case SAMSUNG:
-               return snprintf(buf, PAGE_SIZE, "samsung\n");
+               return sysfs_emit(buf, "samsung\n");
        case INFINEON:
-               return snprintf(buf, PAGE_SIZE, "infineon\n");
+               return sysfs_emit(buf, "infineon\n");
        case ELPIDA:
-               return snprintf(buf, PAGE_SIZE, "elpida\n");
+               return sysfs_emit(buf, "elpida\n");
        case ETRON:
-               return snprintf(buf, PAGE_SIZE, "etron\n");
+               return sysfs_emit(buf, "etron\n");
        case NANYA:
-               return snprintf(buf, PAGE_SIZE, "nanya\n");
+               return sysfs_emit(buf, "nanya\n");
        case HYNIX:
-               return snprintf(buf, PAGE_SIZE, "hynix\n");
+               return sysfs_emit(buf, "hynix\n");
        case MOSEL:
-               return snprintf(buf, PAGE_SIZE, "mosel\n");
+               return sysfs_emit(buf, "mosel\n");
        case WINBOND:
-               return snprintf(buf, PAGE_SIZE, "winbond\n");
+               return sysfs_emit(buf, "winbond\n");
        case ESMT:
-               return snprintf(buf, PAGE_SIZE, "esmt\n");
+               return sysfs_emit(buf, "esmt\n");
        case MICRON:
-               return snprintf(buf, PAGE_SIZE, "micron\n");
+               return sysfs_emit(buf, "micron\n");
        default:
-               return snprintf(buf, PAGE_SIZE, "unknown\n");
+               return sysfs_emit(buf, "unknown\n");
        }
 }
 
index 33f748e5bbfc2c3a3b363bd1869f59b5fa2a4a95..8567d5d773460b66a410cc660dbad38f12bf391d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/list.h>
 #include "amdgpu.h"
 #include "amdgpu_xgmi.h"
-#include "amdgpu_smu.h"
 #include "amdgpu_ras.h"
 #include "soc15.h"
 #include "df/df_3_6_offset.h"
@@ -217,7 +216,7 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
 
 }
 
@@ -246,7 +245,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
 
        adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", error_count);
+       return sysfs_emit(buf, "%u\n", error_count);
 }
 
 
@@ -629,7 +628,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
        return psp_xgmi_terminate(&adev->psp);
 }
 
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
        struct ras_ih_if ih_info = {
@@ -643,7 +642,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
-       amdgpu_xgmi_reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
 
        if (!adev->gmc.xgmi.ras_if) {
                adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -665,7 +664,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
        return r;
 }
 
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
                        adev->gmc.xgmi.ras_if) {
@@ -692,7 +691,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg
        WREG32_PCIE(pcs_status_reg, 0);
 }
 
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
 {
        uint32_t i;
 
@@ -752,8 +751,8 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
        return 0;
 }
 
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
-                                     void *ras_error_status)
+static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+                                            void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        int i;
@@ -802,10 +801,17 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
-       amdgpu_xgmi_reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
 
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
 
        return 0;
 }
+
+const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
+       .ras_late_init = amdgpu_xgmi_ras_late_init,
+       .ras_fini = amdgpu_xgmi_ras_fini,
+       .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+       .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+};
index 148560d63554364e7cc1485c57c0fe00724f0440..12969c0830d5ce9d8194bd4974e563864d22ad1f 100644 (file)
@@ -50,6 +50,7 @@ struct amdgpu_pcs_ras_field {
        uint32_t pcs_err_shift;
 };
 
+extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
@@ -58,14 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
                struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr);
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
-
 static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
                struct amdgpu_device *bo_adev)
 {
index 5355827ed0ae65b3a84ef862b793aa386bdaf526..1a8f6d4baab24ff3107155822f9d6320cadab652 100644 (file)
@@ -90,11 +90,22 @@ union amd_sriov_msg_feature_flags {
                uint32_t  host_flr_vramlost  : 1;
                uint32_t  mm_bw_management   : 1;
                uint32_t  pp_one_vf_mode     : 1;
-               uint32_t  reserved           : 27;
+               uint32_t  reg_indirect_acc   : 1;
+               uint32_t  reserved           : 26;
        } flags;
        uint32_t      all;
 };
 
+union amd_sriov_reg_access_flags {
+       struct {
+               uint32_t vf_reg_access_ih    : 1;
+               uint32_t vf_reg_access_mmhub : 1;
+               uint32_t vf_reg_access_gc    : 1;
+               uint32_t reserved            : 29;
+       } flags;
+       uint32_t all;
+};
+
 union amd_sriov_msg_os_info {
        struct {
                uint32_t  windows            : 1;
@@ -149,8 +160,10 @@ struct amd_sriov_msg_pf2vf_info {
        /* identification in ROCm SMI */
        uint64_t uuid;
        uint32_t fcn_idx;
+       /* flags which indicate the register access method VF should use */
+       union amd_sriov_reg_access_flags reg_access_flags;
        /* reserved */
-       uint32_t reserved[256-26];
+       uint32_t reserved[256-27];
 };
 
 struct amd_sriov_msg_vf2pf_info_header {
index 43b978144b79688a78a73db467bfd5594161fb27..c4bb8eed246d66789bbd387d80711894caa9e582 100644 (file)
@@ -984,10 +984,9 @@ static int cik_sdma_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index ea825b4f8ee807d538d6e8395da6bdc8805f02f8..d1570a462a51a95088fbbbde4ad27be74f3b0465 100644 (file)
@@ -2896,6 +2896,11 @@ static int dce_v10_0_hw_fini(void *handle)
 static int dce_v10_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2920,8 +2925,10 @@ static int dce_v10_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v10_0_is_idle(void *handle)
index a360a6dec1988c5909449ab11f95cc5ae0c341b1..18a7b3bd633b5d06198b129ceb25e7ef3677a3ef 100644 (file)
@@ -3026,6 +3026,11 @@ static int dce_v11_0_hw_fini(void *handle)
 static int dce_v11_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -3050,8 +3055,10 @@ static int dce_v11_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v11_0_is_idle(void *handle)
index ef124ac853b60f3b03da2dbe9b7b2ade7ee522ae..dbcb09cf83e63b89c46224a619d902c0e963e6f4 100644 (file)
@@ -2769,7 +2769,11 @@ static int dce_v6_0_hw_fini(void *handle)
 static int dce_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
 
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
 
@@ -2793,8 +2797,10 @@ static int dce_v6_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v6_0_is_idle(void *handle)
index c986501838285fd354789d0753a016799f0b6b23..b200b9e722d97887bb20472fdb635e3289af2433 100644 (file)
@@ -2795,6 +2795,11 @@ static int dce_v8_0_hw_fini(void *handle)
 static int dce_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2819,8 +2824,10 @@ static int dce_v8_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v8_0_is_idle(void *handle)
index 9810af712cc0d1d01164b1255a0db093fa33e6f9..5c11144da0513dc4a92064e6d50e3ad8bb7d95b2 100644 (file)
@@ -39,6 +39,7 @@
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_display.h"
 
 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
@@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
 
 static int dce_virtual_suspend(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        return dce_virtual_hw_fini(handle);
 }
 
 static int dce_virtual_resume(void *handle)
 {
-       return dce_virtual_hw_init(handle);
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = dce_virtual_hw_init(handle);
+       if (r)
+               return r;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_virtual_is_idle(void *handle)
index 44109a6b8f4427060f9be5c5010f11df1083dce4..0d8459d63bac172a3710f6448e233657c2676fe7 100644 (file)
@@ -205,7 +205,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
                        count++;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%i\n", count);
+       return sysfs_emit(buf, "%i\n", count);
 }
 
 /* device attr for available perfmon counters */
index 45d1172b7bff93e8ef38e87859467c5944e93e0c..196d9d2a2e47460d3c6aa6f57957031fef1141ed 100644 (file)
@@ -29,7 +29,6 @@
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
 #include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
 #include "nv.h"
 #include "nvd.h"
 
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
 
+#define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
+#define GFX_RLCG_GC_WRITE      (0x0 << 28)
+#define GFX_RLCG_GC_READ       (0x1 << 28)
+#define GFX_RLCG_MMHUB_WRITE   (0x2 << 28)
+
 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -1419,38 +1423,127 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
 };
 
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
+{
+       /* always programed by rlcg, only for gc */
+       if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
+               if (!amdgpu_sriov_reg_indirect_gc(adev))
+                       *flag = GFX_RLCG_GC_WRITE_OLD;
+               else
+                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+
+               return true;
+       }
+
+       /* currently support gc read/write, mmhub write */
+       if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
+           offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
+               if (amdgpu_sriov_reg_indirect_gc(adev))
+                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+               else
+                       return false;
+       } else {
+               if (amdgpu_sriov_reg_indirect_mmhub(adev))
+                       *flag = GFX_RLCG_MMHUB_WRITE;
+               else
+                       return false;
+       }
+
+       return true;
+}
+
+static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
 {
        static void *scratch_reg0;
        static void *scratch_reg1;
+       static void *scratch_reg2;
+       static void *scratch_reg3;
        static void *spare_int;
+       static uint32_t grbm_cntl;
+       static uint32_t grbm_idx;
        uint32_t i = 0;
        uint32_t retries = 50000;
+       u32 ret = 0;
+
+       scratch_reg0 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
+       scratch_reg1 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
+       scratch_reg2 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
+       scratch_reg3 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
+       spare_int = adev->rmmio +
+                   (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+       grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+       grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+       if (offset == grbm_cntl || offset == grbm_idx) {
+               if (offset  == grbm_cntl)
+                       writel(v, scratch_reg2);
+               else if (offset == grbm_idx)
+                       writel(v, scratch_reg3);
+
+               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+       } else {
+               writel(v, scratch_reg0);
+               writel(offset | flag, scratch_reg1);
+               writel(1, spare_int);
+               for (i = 0; i < retries; i++) {
+                       u32 tmp;
+
+                       tmp = readl(scratch_reg1);
+                       if (!(tmp & flag))
+                               break;
 
-       scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
-       scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
-       spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+                       udelay(10);
+               }
 
-       if (amdgpu_sriov_runtime(adev)) {
-               pr_err("shouldn't call rlcg write register during runtime\n");
-               return;
+               if (i >= retries)
+                       pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
        }
 
-       writel(v, scratch_reg0);
-       writel(offset | 0x80000000, scratch_reg1);
-       writel(1, spare_int);
-       for (i = 0; i < retries; i++) {
-               u32 tmp;
+       ret = readl(scratch_reg0);
 
-               tmp = readl(scratch_reg1);
-               if (!(tmp & 0x80000000))
-                       break;
+       return ret;
+}
+
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+{
+       uint32_t rlcg_flag;
 
-               udelay(10);
+       if (amdgpu_sriov_fullaccess(adev) &&
+           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+               gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
+
+               return;
        }
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               WREG32_NO_KIQ(offset, value);
+       else
+               WREG32(offset, value);
+}
 
-       if (i >= retries)
-               pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+{
+       uint32_t rlcg_flag;
+
+       if (amdgpu_sriov_fullaccess(adev) &&
+           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+               return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               return RREG32_NO_KIQ(offset);
+       else
+               return RREG32(offset);
+
+       return 0;
 }
 
 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -4459,9 +4552,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
        sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
        irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type,
-                            AMDGPU_RING_PRIO_DEFAULT);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
        return 0;
@@ -4495,8 +4587,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            hw_prio, NULL);
        if (r)
                return r;
 
@@ -7172,16 +7264,10 @@ static int gfx_v10_0_hw_init(void *handle)
                 * loaded firstly, so in direct type, it has to load smc ucode
                 * here before rlc.
                 */
-               if (adev->smu.ppt_funcs != NULL && !(adev->flags & AMD_IS_APU)) {
-                       r = smu_load_microcode(&adev->smu);
+               if (!(adev->flags & AMD_IS_APU)) {
+                       r = amdgpu_pm_load_smu_firmware(adev, NULL);
                        if (r)
                                return r;
-
-                       r = smu_check_fw_status(&adev->smu);
-                       if (r) {
-                               pr_err("SMC firmware status is not correct\n");
-                               return r;
-                       }
                }
                gfx_v10_0_disable_gpa_mode(adev);
        }
@@ -7892,6 +7978,7 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
        .start = gfx_v10_0_rlc_start,
        .update_spm_vmid = gfx_v10_0_update_spm_vmid,
        .rlcg_wreg = gfx_v10_rlcg_wreg,
+       .rlcg_rreg = gfx_v10_rlcg_rreg,
        .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
 
index ca74638dec9b7a4379642f6684014354808f0f0e..3a8d52a54873f64e9f2fd1f2575bed43decc8c66 100644 (file)
@@ -3114,7 +3114,7 @@ static int gfx_v6_0_sw_init(void *handle)
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
@@ -3137,7 +3137,7 @@ static int gfx_v6_0_sw_init(void *handle)
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq, irq_type,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index a368724c3dfcd079bd89e72639ba260d69226ce0..c35fdd2ef2d4daf5d1df0142ffb86ada692bb894 100644 (file)
@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          acccess. These should be enabled by FW for target VMIDs. */
+          access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
                WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  *
  * Set up the number and offset of the CP scratch registers.
- * NOTE: use of CP scratch registers is a legacy inferface and
+ * NOTE: use of CP scratch registers is a legacy interface and
  * is not used by default on newer asics (r6xx+).  On newer asics,
  * memory buffers are used for fences rather than scratch regs.
  */
@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
  * @seq: sequence number
  * @flags: fence related flags
  *
- * Emits a fence sequnce number on the gfx ring and flushes
+ * Emits a fence sequence number on the gfx ring and flushes
  * GPU caches.
  */
 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
  * @seq: sequence number
  * @flags: fence related flags
  *
- * Emits a fence sequnce number on the compute ring and flushes
+ * Emits a fence sequence number on the compute ring and flushes
  * GPU caches.
  */
 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
  *
  * @ring: amdgpu_ring structure holding ring information
- * @job: job to retrive vmid from
+ * @job: job to retrieve vmid from
  * @ib: amdgpu indirect buffer object
  * @flags: options (AMDGPU_HAVE_CTX_SWITCH)
  *
  * Emits an DE (drawing engine) or CE (constant engine) IB
  * on the gfx ring.  IBs are usually generated by userspace
  * acceleration drivers and submitted to the kernel for
- * sheduling on the ring.  This function schedules the IB
+ * scheduling on the ring.  This function schedules the IB
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -2402,7 +2402,7 @@ err1:
 
 /*
  * CP.
- * On CIK, gfx and compute now have independant command processors.
+ * On CIK, gfx and compute now have independent command processors.
  *
  * GFX
  * Gfx consists of a single ring and can process both gfx jobs and
@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
        ring->wptr = 0;
        WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
 
-       /* set the wb address wether it's enabled or not */
+       /* set the wb address whether it's enabled or not */
        rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
        WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
        WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
        mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
 
-       /* set the wb address wether it's enabled or not */
+       /* set the wb address whether it's enabled or not */
        wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
        mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
 /**
  * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
  *
- * @ring: the ring to emmit the commands to
+ * @ring: the ring to emit the commands to
  *
  * Sync the command pipeline with the PFP. E.g. wait for everything
  * to be completed.
@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 4); /* poll interval */
 
        if (usepfp) {
-               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               /* sync CE with ME to prevent CE fetch CEIB before context switch done */
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -4438,7 +4438,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
                             &adev->gfx.eop_irq, irq_type,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -4512,7 +4512,7 @@ static int gfx_v7_0_sw_init(void *handle)
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 4d45844a1d92b1f023e42859a46ffb5cf40126a8..c26e0605946622900de485d5c146fbac292570c3 100644 (file)
@@ -1927,8 +1927,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            hw_prio, NULL);
        if (r)
                return r;
 
@@ -2033,7 +2033,7 @@ static int gfx_v8_0_sw_init(void *handle)
 
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 1393ccae74180149559c4736515884fb365f3600..06811a1f462561479aa0711b910de5942eb4835b 100644 (file)
@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
        mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
 };
 
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
 {
        static void *scratch_reg0;
        static void *scratch_reg1;
@@ -787,6 +787,20 @@ static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
 
 }
 
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+       if (amdgpu_sriov_fullaccess(adev)) {
+               gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+
+               return;
+       }
+
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               WREG32_NO_KIQ(offset, v);
+       else
+               WREG32(offset, v);
+}
+
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -2089,45 +2103,22 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+        .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+        .select_se_sh = &gfx_v9_0_select_se_sh,
+        .read_wave_data = &gfx_v9_0_read_wave_data,
+        .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+        .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+        .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+};
+
+static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
+       .ras_late_init = amdgpu_gfx_ras_late_init,
+       .ras_fini = amdgpu_gfx_ras_fini,
        .ras_error_inject = &gfx_v9_0_ras_error_inject,
        .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
        .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
 };
 
-static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
-       .ras_error_inject = &gfx_v9_4_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
-       .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
-};
-
-static const struct amdgpu_gfx_funcs gfx_v9_4_2_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
-       .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
-       .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
-       .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
-       .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
-};
-
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
        u32 gb_addr_config;
@@ -2154,6 +2145,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                DRM_INFO("fix gfx.config for vega12\n");
                break;
        case CHIP_VEGA20:
+               adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2179,7 +2171,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_ARCTURUS:
-               adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
+               adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2200,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                gb_addr_config |= 0x22010042;
                break;
        case CHIP_ALDEBARAN:
-               adev->gfx.funcs = &gfx_v9_4_2_gfx_funcs;
+               adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2286,8 +2278,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       return amdgpu_ring_init(adev, ring, 1024,
-                               &adev->gfx.eop_irq, irq_type, hw_prio);
+       return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                               hw_prio, NULL);
 }
 
 static int gfx_v9_0_sw_init(void *handle)
@@ -2376,10 +2368,9 @@ static int gfx_v9_0_sw_init(void *handle)
                        sprintf(ring->name, "gfx_%d", i);
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq,
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
@@ -2434,7 +2425,9 @@ static int gfx_v9_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_gfx_ras_fini(adev);
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->ras_fini)
+               adev->gfx.ras_funcs->ras_fini(adev);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4025,8 +4018,14 @@ static int gfx_v9_0_hw_fini(void *handle)
        }
 
        gfx_v9_0_cp_enable(adev, false);
-       adev->gfx.rlc.funcs->stop(adev);
 
+       /* Skip suspend with A+A reset */
+       if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+               dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+               return 0;
+       }
+
+       adev->gfx.rlc.funcs->stop(adev);
        return 0;
 }
 
@@ -4747,12 +4746,16 @@ static int gfx_v9_0_ecc_late_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_gfx_ras_late_init(adev);
-       if (r)
-               return r;
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->ras_late_init) {
+               r = adev->gfx.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
 
-       if (adev->gfx.funcs->enable_watchdog_timer)
-               adev->gfx.funcs->enable_watchdog_timer(adev);
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->enable_watchdog_timer)
+               adev->gfx.ras_funcs->enable_watchdog_timer(adev);
 
        return 0;
 }
index bc699d680ce8bde40d892d3a4d0263100834ec29..830080ff90d85829df22807cc8cf876d2b5f98dd 100644 (file)
@@ -863,8 +863,8 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status)
+static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+                                         void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        uint32_t sec_count = 0, ded_count = 0;
@@ -906,7 +906,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
 {
        int i, j, k;
 
@@ -971,7 +971,8 @@ void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
 }
 
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+                                    void *inject_if)
 {
        struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
        int ret;
@@ -996,7 +997,7 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
        { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
 
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
 {
        uint32_t i, j;
        uint32_t reg_value;
@@ -1021,3 +1022,12 @@ void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
+        .ras_late_init = amdgpu_gfx_ras_late_init,
+        .ras_fini = amdgpu_gfx_ras_fini,
+        .ras_error_inject = &gfx_v9_4_ras_error_inject,
+        .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+        .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+        .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
index 875f18473a9816dc6062916e5fd0272e9beb624b..bdd16b568021c1135c36a2e3afc793e4fcfd6ad2 100644 (file)
 #ifndef __GFX_V9_4_H__
 #define __GFX_V9_4_H__
 
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
-
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status);
-
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
-                                    void *inject_if);
-
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
-
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
 
 #endif /* __GFX_V9_4_H__ */
index 2e94998c98120904b1646b35f82dd44a6b84b429..9ca76a3ac38cc15470d01e04949cb0d7aa087f96 100644 (file)
@@ -1283,4 +1283,15 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
        }
        gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
-}
\ No newline at end of file
+}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
+       .ras_late_init = amdgpu_gfx_ras_late_init,
+       .ras_fini = amdgpu_gfx_ras_fini,
+       .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+       .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+       .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+       .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+       .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+       .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+};
index c143d178ef9803c98ec41b9d08f4e64d8a640443..81c5833b6b9f5e9f4b9b278034b1106736addbf2 100644 (file)
@@ -30,11 +30,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
                                      uint32_t die_id);
 void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
 
-void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev);
-int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if);
-void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev);
-int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status);
-void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev);
-void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+
 #endif /* __GFX_V9_4_2_H__ */
index 33e54eed2eec40513669654a43e659ce4a032db9..2bfd620576f20637b25af7a8f54070c2fba20940 100644 (file)
@@ -655,7 +655,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
                adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v8_7_funcs;
+               adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
                break;
        default:
                break;
index 468acc06b681fe819f4a8a2e1a137ad2cd4ececb..c82d82da2c7395cfb5b503925634574597ed1d17 100644 (file)
@@ -653,7 +653,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.num_types = 1;
        adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 
-       if (!amdgpu_sriov_vf(adev)) {
+       if (!amdgpu_sriov_vf(adev) &&
+           !adev->gmc.xgmi.connected_to_cpu) {
                adev->gmc.ecc_irq.num_types = 1;
                adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
        }
@@ -1155,7 +1156,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v6_1_funcs;
+               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
                break;
        case CHIP_ARCTURUS:
                adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1163,7 +1164,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v6_1_funcs;
+               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
                break;
        default:
                break;
@@ -1185,6 +1186,24 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
        }
 }
 
+static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+               adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+               break;
+       case CHIP_ARCTURUS:
+               adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+               break;
+       case CHIP_ALDEBARAN:
+               adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+               break;
+       default:
+               /* mmhub ras is not available */
+               break;
+       }
+}
+
 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 {
        adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1194,12 +1213,6 @@ static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v9_0_set_gmc_funcs(adev);
-       gmc_v9_0_set_irq_funcs(adev);
-       gmc_v9_0_set_umc_funcs(adev);
-       gmc_v9_0_set_mmhub_funcs(adev);
-       gmc_v9_0_set_gfxhub_funcs(adev);
-
        if (adev->asic_type == CHIP_VEGA20 ||
            adev->asic_type == CHIP_ARCTURUS)
                adev->gmc.xgmi.supported = true;
@@ -1208,7 +1221,14 @@ static int gmc_v9_0_early_init(void *handle)
                adev->gmc.xgmi.supported = true;
                adev->gmc.xgmi.connected_to_cpu =
                        adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
-        }
+       }
+
+       gmc_v9_0_set_gmc_funcs(adev);
+       gmc_v9_0_set_irq_funcs(adev);
+       gmc_v9_0_set_umc_funcs(adev);
+       gmc_v9_0_set_mmhub_funcs(adev);
+       gmc_v9_0_set_mmhub_ras_funcs(adev);
+       gmc_v9_0_set_gfxhub_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
        adev->gmc.shared_aperture_end =
@@ -1240,8 +1260,9 @@ static int gmc_v9_0_late_init(void *handle)
                }
        }
 
-       if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
-               adev->mmhub.funcs->reset_ras_error_count(adev);
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->reset_ras_error_count)
+               adev->mmhub.ras_funcs->reset_ras_error_count(adev);
 
        r = amdgpu_gmc_ras_late_init(adev);
        if (r)
@@ -1506,7 +1527,8 @@ static int gmc_v9_0_sw_init(void *handle)
        if (r)
                return r;
 
-       if (!amdgpu_sriov_vf(adev)) {
+       if (!amdgpu_sriov_vf(adev) &&
+           !adev->gmc.xgmi.connected_to_cpu) {
                /* interrupt sent to DF. */
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
                                      &adev->gmc.ecc_irq);
index 7332a320ede87c881db974fabb28a29c9f9fde4c..9360204da7fb7b5c39619a67f7c1af4dac70ecce 100644 (file)
@@ -487,7 +487,7 @@ int jpeg_v1_0_sw_init(void *handle)
        ring = &adev->jpeg.inst->ring_dec;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
-                            0, AMDGPU_RING_PRIO_DEFAULT);
+                            0, AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 3b22953aa62e1a071b104b79cf38e0e98bc0cf55..de5abceced0dd28ac92cacdf9a424a8388860de2 100644 (file)
@@ -108,7 +108,7 @@ static int jpeg_v2_0_sw_init(void *handle)
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
-                            0, AMDGPU_RING_PRIO_DEFAULT);
+                            0, AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 072774ae16bd48c4fe571b1dd3b6c42ea9016277..83531997aeba9056bf0e802decdd9f5e254c01a7 100644 (file)
@@ -115,7 +115,7 @@ static int jpeg_v2_5_sw_init(void *handle)
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
                sprintf(ring->name, "jpeg_dec_%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
-                                    0, AMDGPU_RING_PRIO_DEFAULT);
+                                    0, AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
index e8fbb2a0de3409b62ca5f0f4e5a38a515cc7ad0f..de5dfcfb385919985f983bf7a2c9b90da5fbbfa5 100644 (file)
@@ -94,7 +94,7 @@ static int jpeg_v3_0_sw_init(void *handle)
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 7f30629f21a21dd3b90f35089eb5f27ab9f3172f..a7ec4ac89da5c7cba0815b633c5b364884588cdb 100644 (file)
@@ -848,7 +848,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
        ring->no_scheduler = true;
        sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
-       return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
+       return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+                               AMDGPU_RING_PRIO_DEFAULT, NULL);
 }
 
 static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
index d7b39c07de2054c38d56830f9207c87e790c0f3b..aa9be5612c8908d62f6159605ee398c317c7289e 100644 (file)
@@ -776,10 +776,14 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
        .get_fb_location = mmhub_v1_0_get_fb_location,
        .init = mmhub_v1_0_init,
        .gart_enable = mmhub_v1_0_gart_enable,
index d77f5b65a6186d5cadbe8968ecea88e41a753a60..4661b094e00784d907f0efa22097fdb1a6452227 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V1_0_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
 
 #endif
index ae7d8a1738a3628c22ab6d50e948252bdc1f0a18..7977a7879b321d17b6f4ee1749ca76fb3c70915f 100644 (file)
@@ -1313,10 +1313,15 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v1_7_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
+       .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
        .get_fb_location = mmhub_v1_7_get_fb_location,
        .init = mmhub_v1_7_init,
        .gart_enable = mmhub_v1_7_gart_enable,
@@ -1325,5 +1330,4 @@ const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
        .set_clockgating = mmhub_v1_7_set_clockgating,
        .get_clockgating = mmhub_v1_7_get_clockgating,
        .setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs,
-       .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
 };
index bf2fbeb172d164f82f2854a8339a6e4280483e3c..a7f9dfc2469725bb6231e8855a5ef9a826cf64a9 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V1_7_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
 
 #endif
index f107385faba25c41c7f4b5f0f84bff4d63b6deda..da7edd1ed6b27c17cc1cef58421ef1ef43884b8d 100644 (file)
@@ -689,7 +689,6 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
        .init = mmhub_v2_0_init,
        .gart_enable = mmhub_v2_0_gart_enable,
        .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
index ab9be5ad5a5fd102f20eaf7581007e2b8827f275..1141c37432f065b07f7e47292e3a3581252fb6dd 100644 (file)
@@ -616,7 +616,6 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
        .init = mmhub_v2_3_init,
        .gart_enable = mmhub_v2_3_gart_enable,
        .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
index 4a31737b6bb0b488180b496b207f4b4f45c3eda7..0cffa820ea6e8ff49bc34f5c7a7b85c93023911a 100644 (file)
@@ -1652,10 +1652,15 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
        .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .get_fb_location = mmhub_v9_4_get_fb_location,
        .init = mmhub_v9_4_init,
        .gart_enable = mmhub_v9_4_gart_enable,
@@ -1664,5 +1669,4 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .set_clockgating = mmhub_v9_4_set_clockgating,
        .get_clockgating = mmhub_v9_4_get_clockgating,
        .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
-       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
 };
index 92404a8f66f3c425240d1992e19bb65aac7ed299..90436efa92ef28c5a488b9bea0a32024a48215d0 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V9_4_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
 
 #endif
index c477f8972d5dfe2ea36e06945db1bde6d77a3460..af44aad781716c28e2b214207e4140167f80b39e 100644 (file)
@@ -557,6 +557,16 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
                       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
 }
 
+const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+       .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+       .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+       .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+       .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+       .ras_late_init = amdgpu_nbio_ras_late_init,
+       .ras_fini = amdgpu_nbio_ras_fini,
+};
+
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -577,10 +587,4 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
-       .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
-       .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
-       .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
-       .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
-       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
-       .ras_late_init = amdgpu_nbio_ras_late_init,
 };
index b1ac828727526367bfa3fc2313c1dfb63ec15a49..b8216581ec8d3fcf6cc6b60ff2fe072e0534204e 100644 (file)
@@ -28,5 +28,6 @@
 
 extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
+extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
 
 #endif
index 5846eac292c3f515ac748ec8be4c74f40a6397e8..46d4bbabce75bdfcada193a983457b7ecebc42c9 100644 (file)
@@ -34,7 +34,6 @@
 #include "amdgpu_vce.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
 #include "atom.h"
 #include "amd_pcie.h"
 
@@ -516,21 +515,9 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
        return ret;
 }
 
-static bool nv_asic_supports_baco(struct amdgpu_device *adev)
-{
-       struct smu_context *smu = &adev->smu;
-
-       if (smu_baco_is_support(smu))
-               return true;
-       else
-               return false;
-}
-
 static enum amd_reset_method
 nv_asic_reset_method(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
-
        if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
            amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
            amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
@@ -549,7 +536,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
        case CHIP_DIMGREY_CAVEFISH:
                return AMD_RESET_METHOD_MODE1;
        default:
-               if (smu_baco_is_support(smu))
+               if (amdgpu_dpm_is_baco_supported(adev))
                        return AMD_RESET_METHOD_BACO;
                else
                        return AMD_RESET_METHOD_MODE1;
@@ -559,11 +546,6 @@ nv_asic_reset_method(struct amdgpu_device *adev)
 static int nv_asic_reset(struct amdgpu_device *adev)
 {
        int ret = 0;
-       struct smu_context *smu = &adev->smu;
-
-       /* skip reset on vangogh for now */
-       if (adev->asic_type == CHIP_VANGOGH)
-               return 0;
 
        switch (nv_asic_reset_method(adev)) {
        case AMD_RESET_METHOD_PCI:
@@ -572,13 +554,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
                break;
        case AMD_RESET_METHOD_BACO:
                dev_info(adev->dev, "BACO reset\n");
-
-               ret = smu_baco_enter(smu);
-               if (ret)
-                       return ret;
-               ret = smu_baco_exit(smu);
-               if (ret)
-                       return ret;
+               ret = amdgpu_dpm_baco_reset(adev);
                break;
        case AMD_RESET_METHOD_MODE2:
                dev_info(adev->dev, "MODE2 reset\n");
@@ -986,7 +962,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
        .need_full_reset = &nv_need_full_reset,
        .need_reset_on_init = &nv_need_reset_on_init,
        .get_pcie_replay_count = &nv_get_pcie_replay_count,
-       .supports_baco = &nv_asic_supports_baco,
+       .supports_baco = &amdgpu_dpm_is_baco_supported,
        .pre_asic_init = &nv_pre_asic_init,
        .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
        .query_video_codecs = &nv_query_video_codecs,
index a41b054fe0ba16b9f10faea3300b966c26d46e35..dd4d65f7e0f00d470cd2b90294a427f1a3a55616 100644 (file)
@@ -102,6 +102,21 @@ enum psp_gfx_cmd_id
     /* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */
     GFX_CMD_ID_LOAD_TOC           = 0x00000020,   /* Load TOC and obtain TMR size */
     GFX_CMD_ID_AUTOLOAD_RLC       = 0x00000021,   /* Indicates all graphics fw loaded, start RLC autoload */
+    GFX_CMD_ID_BOOT_CFG           = 0x00000022,   /* Boot Config */
+};
+
+/* PSP boot config sub-commands */
+enum psp_gfx_boot_config_cmd
+{
+    BOOTCFG_CMD_SET         = 1, /* Set boot configuration settings */
+    BOOTCFG_CMD_GET         = 2, /* Get boot configuration settings */
+    BOOTCFG_CMD_INVALIDATE  = 3  /* Reset current boot configuration settings to VBIOS defaults */
+};
+
+/* PSP boot config bitmask values */
+enum psp_gfx_boot_config
+{
+    BOOT_CONFIG_GECC = 0x1,
 };
 
 /* Command to load Trusted Application binary into PSP OS. */
@@ -273,6 +288,15 @@ struct psp_gfx_cmd_load_toc
     uint32_t        toc_size;               /* FW buffer size in bytes */
 };
 
+/* Dynamic boot configuration */
+struct psp_gfx_cmd_boot_cfg
+{
+    uint32_t                        timestamp;            /* calendar time as number of seconds */
+    enum psp_gfx_boot_config_cmd    sub_cmd;              /* sub-command indicating how to process command data */
+    uint32_t                        boot_config;          /* dynamic boot configuration bitmask */
+    uint32_t                        boot_config_valid;    /* dynamic boot configuration valid bits bitmask */
+};
+
 /* All GFX ring buffer commands. */
 union psp_gfx_commands
 {
@@ -285,6 +309,7 @@ union psp_gfx_commands
     struct psp_gfx_cmd_reg_prog       cmd_setup_reg_prog;
     struct psp_gfx_cmd_setup_tmr        cmd_setup_vmr;
     struct psp_gfx_cmd_load_toc         cmd_load_toc;
+    struct psp_gfx_cmd_boot_cfg         boot_cfg;
 };
 
 struct psp_gfx_uresp_reserved
index c325d6f53a71eed8932bb40c8d42878e4a262982..589410c32d095a47e3f5993d4e468622c8305e9a 100644 (file)
@@ -598,7 +598,7 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
 }
 
 /*
- * save and restore proces
+ * save and restore process
  */
 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 {
@@ -661,9 +661,9 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 
        if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
                /*
-                * Long traing will encroach certain mount of bottom VRAM,
-                * saving the content of this bottom VRAM to system memory
-                * before training, and restoring it after training to avoid
+                * Long training will encroach a certain amount on the bottom of VRAM;
+                * save the content from the bottom of VRAM to system memory
+                * before training, and restore it after training to avoid
                 * VRAM corruption.
                 */
                sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
index eb5dc6c5b46ee0f746d0a4609a369d08204d9694..9f0dda040ec8823fc57f1c203830e48e6a616a43 100644 (file)
@@ -876,12 +876,10 @@ static int sdma_v2_4_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = false;
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index ad308d8c6d30843b44cc1c839e093198bb0cba89..135727b59c41e8f37a5d93ed53759ad2f423bf3f 100644 (file)
@@ -1160,12 +1160,10 @@ static int sdma_v3_0_sw_init(void *handle)
                }
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 98b7db516438931656236f29db54fa893804e2e1..5715be6770ecc0314e0c40d71b9412a7771d474a 100644 (file)
@@ -1968,7 +1968,7 @@ static int sdma_v4_0_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
                                     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
@@ -1987,7 +1987,7 @@ static int sdma_v4_0_sw_init(void *handle)
                        r = amdgpu_ring_init(adev, ring, 1024,
                                             &adev->sdma.trap_irq,
                                             AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index d345e324837ddf2152757e7119cf563882770ed6..920fc6d4a1273b2470c9fbe071e10385dbe3183b 100644 (file)
@@ -1273,12 +1273,10 @@ static int sdma_v5_0_sw_init(void *handle)
                        : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index b39e7db0e299ef3954cd0fa9aacf2e3d65558729..93f826a7d3f038ab9c5e9b99dd0138d8606b6bd2 100644 (file)
@@ -1283,10 +1283,9 @@ static int sdma_v5_2_sw_init(void *handle)
                        (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
                                     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 488497ad5e0ccace84b3cbe7e857b07d400f890e..cb703e307238d9e7d1f0babdede723906769e993 100644 (file)
@@ -507,10 +507,9 @@ static int si_dma_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 3808402cd964ec2a3a1bdd090c54cbb29a29dea3..5c5eb3aed1b3c7762e3f95252faf1f3a0f38bd00 100644 (file)
@@ -76,7 +76,6 @@
 #include "smuio_v13_0.h"
 #include "dce_virtual.h"
 #include "mxgpu_ai.h"
-#include "amdgpu_smu.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_xgmi.h"
 #include <uapi/linux/kfd_ioctl.h>
@@ -1495,8 +1494,8 @@ static int soc15_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS |
-                       AMD_CG_SUPPORT_IH_CG;
-                       /*AMD_CG_SUPPORT_VCN_MGCG |AMD_CG_SUPPORT_JPEG_MGCG;*/
+                       AMD_CG_SUPPORT_IH_CG |
+                       AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
                adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
@@ -1524,8 +1523,9 @@ static int soc15_common_late_init(void *handle)
        if (adev->hdp.funcs->reset_ras_error_count)
                adev->hdp.funcs->reset_ras_error_count(adev);
 
-       if (adev->nbio.funcs->ras_late_init)
-               r = adev->nbio.funcs->ras_late_init(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->ras_late_init)
+               r = adev->nbio.ras_funcs->ras_late_init(adev);
 
        return r;
 }
@@ -1546,7 +1546,9 @@ static int soc15_common_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_nbio_ras_fini(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->ras_fini)
+               adev->nbio.ras_funcs->ras_fini(adev);
        adev->df.funcs->sw_fini(adev);
        return 0;
 }
@@ -1610,9 +1612,11 @@ static int soc15_common_hw_fini(void *handle)
 
        if (adev->nbio.ras_if &&
            amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
-               if (adev->nbio.funcs->init_ras_controller_interrupt)
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->init_ras_controller_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
-               if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
        }
 
index 8cdf5d1685cbbb789287fd17d37cacd0cd1120b5..14bd794bbea69acc99e059267e4ceb6e97f00bab 100644 (file)
 })
 
 #define WREG32_RLC(reg, value) \
-       do {                                                    \
-               if (amdgpu_sriov_fullaccess(adev)) {    \
-                       uint32_t i = 0; \
-                       uint32_t retries = 50000;       \
-                       uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0;   \
-                       uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1;   \
-                       uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT;  \
-                       WREG32(r0, value);      \
-                       WREG32(r1, (reg | 0x80000000)); \
-                       WREG32(spare_int, 0x1); \
-                       for (i = 0; i < retries; i++) { \
-                               u32 tmp = RREG32(r1);   \
-                               if (!(tmp & 0x80000000))        \
-                                       break;  \
-                               udelay(10);     \
-                       }       \
-                       if (i >= retries)       \
-                               pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg);     \
-               } else {        \
-                       WREG32(reg, value); \
-               }       \
+       do { \
+               if (adev->gfx.rlc.funcs->rlcg_wreg) \
+                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
+               else \
+                       WREG32(reg, value);     \
        } while (0)
 
 #define WREG32_RLC_EX(prefix, reg, value) \
        } while (0)
 
 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
-       do {                                                    \
-               uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
-               if (amdgpu_sriov_fullaccess(adev)) {    \
-                       uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2;   \
-                       uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3;   \
-                       uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;   \
-                       uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;   \
-                       if (target_reg == grbm_cntl) \
-                               WREG32(r2, value);      \
-                       else if (target_reg == grbm_idx) \
-                               WREG32(r3, value);      \
-                       WREG32(target_reg, value);      \
-               } else {        \
-                       WREG32(target_reg, value); \
-               }       \
+       WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+
+#define RREG32_RLC(reg) \
+       (adev->gfx.rlc.funcs->rlcg_rreg ? \
+               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
+
+#define WREG32_RLC_NO_KIQ(reg, value) \
+       do { \
+               if (adev->gfx.rlc.funcs->rlcg_wreg) \
+                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
+               else \
+                       WREG32_NO_KIQ(reg, value);      \
        } while (0)
 
+#define RREG32_RLC_NO_KIQ(reg) \
+       (adev->gfx.rlc.funcs->rlcg_rreg ? \
+               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+
 #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
        do {                                                    \
                uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
                }       \
        } while (0)
 
+#define RREG32_SOC15_RLC(ip, inst, reg) \
+       RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
 #define WREG32_SOC15_RLC(ip, inst, reg, value) \
        do {                                                    \
-                       uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
-                       WREG32_RLC(target_reg, value); \
+               uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
+               WREG32_RLC(target_reg, value); \
        } while (0)
 
 #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
        } while (0)
 
 #define WREG32_FIELD15_RLC(ip, idx, reg, field, val)   \
-    WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
-    (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
-    & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+       WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+       (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+       & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
 
 #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
-    WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+       WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+
+#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
+       RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
 
 #endif
index 96d7769609f4ada7a65b5abdabae96772165524d..20b44983ac945fbbca71b80b97bbb1f551d61bc3 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include "umc_v6_1.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
 #include "amdgpu.h"
 
 #include "rsmu/rsmu_0_0_2_offset.h"
@@ -464,9 +465,10 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
                umc_v6_1_enable_umc_index_mode(adev);
 }
 
-const struct amdgpu_umc_funcs umc_v6_1_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
        .err_cnt_init = umc_v6_1_err_cnt_init,
        .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
        .query_ras_error_count = umc_v6_1_query_ras_error_count,
        .query_ras_error_address = umc_v6_1_query_ras_error_address,
 };
index 0ce1d323cfddfaae4d7ad92d837320b24c879143..5dc36c730bb2a25d635042935f044a797b4799ee 100644 (file)
@@ -45,7 +45,7 @@
 /* umc ce count initial value */
 #define UMC_V6_1_CE_CNT_INIT   (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_funcs umc_v6_1_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
 extern const uint32_t
        umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
 
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
new file mode 100644 (file)
index 0000000..3a8f787
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
+                                             uint32_t umc_inst,
+                                             uint32_t ch_inst)
+{
+       return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
+}
+
+static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
+                                                  uint32_t umc_reg_offset,
+                                                  unsigned long *error_count)
+{
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+       uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+       uint64_t mc_umc_status;
+       uint32_t mc_umc_status_addr;
+
+       /* UMC 6_1_1 registers */
+       ecc_err_cnt_sel_addr =
+               SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
+       ecc_err_cnt_addr =
+               SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+       /* select the lower chip and check the error count */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+       ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+       *error_count +=
+               (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+                UMC_V6_7_CE_CNT_INIT);
+
+       /* select the higher chip and check the err counter */
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+       ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+       *error_count +=
+               (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+                UMC_V6_7_CE_CNT_INIT);
+
+       /* check for SRAM correctable error
+         MCUMC_STATUS is a 64 bit register */
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+               *error_count += 1;
+}
+
+static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+                                                     uint32_t umc_reg_offset,
+                                                     unsigned long *error_count)
+{
+       uint64_t mc_umc_status;
+       uint32_t mc_umc_status_addr;
+
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+       /* check the MCUMC_STATUS */
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+       if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+               *error_count += 1;
+}
+
+static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
+                                                  uint32_t umc_reg_offset)
+{
+       uint32_t ecc_err_cnt_addr;
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+       ecc_err_cnt_sel_addr =
+               SOC15_REG_OFFSET(UMC, 0,
+                               regUMCCH0_0_EccErrCntSel);
+       ecc_err_cnt_addr =
+               SOC15_REG_OFFSET(UMC, 0,
+                               regUMCCH0_0_EccErrCnt);
+
+       /* select the lower chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                      umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear lower chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_7_CE_CNT_INIT);
+
+       /* select the higher chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear higher chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_7_CE_CNT_INIT);
+}
+
+static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
+{
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+
+               umc_v6_7_reset_error_count_per_channel(adev,
+                                                      umc_reg_offset);
+       }
+}
+
+static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
+                                          void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       /*TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC registers. Will add the protection */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+               umc_v6_7_query_correctable_error_count(adev,
+                                                      umc_reg_offset,
+                                                      &(err_data->ce_count));
+               umc_v6_7_querry_uncorrectable_error_count(adev,
+                                                         umc_reg_offset,
+                                                         &(err_data->ue_count));
+       }
+
+       umc_v6_7_reset_error_count(adev);
+}
+
+static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
+                                        struct ras_err_data *err_data,
+                                        uint32_t umc_reg_offset,
+                                        uint32_t ch_inst,
+                                        uint32_t umc_inst)
+{
+       uint32_t mc_umc_status_addr;
+       uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+       struct eeprom_table_record *err_rec;
+       uint32_t channel_index;
+
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+       mc_umc_addrt0 =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+       if (mc_umc_status == 0)
+               return;
+
+       if (!err_data->err_addr) {
+               /* clear umc status */
+               WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+               return;
+       }
+
+       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+       channel_index =
+               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+       /* calculate error address if ue/ce error is detected */
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+               err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+               err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+               /* translate umc channel address to soc pa, 3 parts are included */
+               retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+                               ADDR_OF_256B_BLOCK(channel_index) |
+                               OFFSET_IN_256B_BLOCK(err_addr);
+
+               /* we only save ue error information currently, ce is skipped */
+               if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+                               == 1) {
+                       err_rec->address = err_addr;
+                       /* page frame address is saved */
+                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
+                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+                       err_rec->cu = 0;
+                       err_rec->mem_channel = channel_index;
+                       err_rec->mcumc_id = umc_inst;
+
+                       err_data->err_addr_cnt++;
+               }
+       }
+
+       /* clear umc status */
+       WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
+                                            void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       /*TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC resgisters. Will add the protection
+        * when firmware interface is ready */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+               umc_v6_7_query_error_address(adev,
+                                            err_data,
+                                            umc_reg_offset,
+                                            ch_inst,
+                                            umc_inst);
+       }
+}
+
+const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
+       .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
+       .query_ras_error_count = umc_v6_7_query_ras_error_count,
+       .query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
new file mode 100644 (file)
index 0000000..4eb85f2
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_7_H__
+#define __UMC_V6_7_H__
+
+/* EccErrCnt max value */
+#define UMC_V6_7_CE_CNT_MAX            0xffff
+/* umc ce interrupt threshold */
+#define UMC_V6_7_CE_INT_THRESHOLD      0xffff
+/* umc ce count initial value */
+#define UMC_V6_7_CE_CNT_INIT   (UMC_V6_7_CE_CNT_MAX - UMC_V6_7_CE_INT_THRESHOLD)
+
+#define UMC_V6_7_INST_DIST     0x40000
+
+extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+#endif
index a064c097690c7d9ae060e126bc9fa9a382817d98..89d20adfa001a1ad3007ca3f6e3932c0cbbfe33b 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include "umc_v8_7.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
 #include "amdgpu.h"
 
 #include "rsmu/rsmu_0_0_2_offset.h"
@@ -323,9 +324,10 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
        .err_cnt_init = umc_v8_7_err_cnt_init,
        .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
        .query_ras_error_count = umc_v8_7_query_ras_error_count,
        .query_ras_error_address = umc_v8_7_query_ras_error_address,
 };
index d4d0468e3df50f20416c75470d39df57f248f421..37e6dc7c28e0d963f035a21d649a20824c7415d8 100644 (file)
@@ -44,7 +44,7 @@
 /* umc ce count initial value */
 #define UMC_V8_7_CE_CNT_INIT   (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
 extern const uint32_t
        umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
 
index 10ecae257b18a17c55e64f6d1b5dc8e806e5fb24..284447d7a579523118923fb8396c31483f094e5e 100644 (file)
@@ -562,7 +562,7 @@ static int uvd_v3_1_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                        AMDGPU_RING_PRIO_DEFAULT);
+                        AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index a70d2a0de316baf0a5b43fe711fc2a73b34e9933..a301518e4957ec10f40232060e114ffef76c05fb 100644 (file)
@@ -119,7 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index f3b0a927101b7449cdd14258ee659a257cbe8975..a4d5bd21c83c714996af6da23ba186abd83302ef 100644 (file)
@@ -117,7 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 760859880c1edde68a3e18032c38c99bc598e705..2bab9c77952fd73163a6df839d441d0827b0de05 100644 (file)
@@ -420,7 +420,7 @@ static int uvd_v6_0_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -434,7 +434,7 @@ static int uvd_v6_0_sw_init(void *handle)
                        sprintf(ring->name, "uvd_enc%d", i);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst->irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index 7cd67cb2ac5f114e945b16041e79fbef6c5ab465..0cd98fcb1f9fcbcdc8c5b2bceb224a754988bd75 100644 (file)
@@ -454,7 +454,7 @@ static int uvd_v7_0_sw_init(void *handle)
                        sprintf(ring->name, "uvd_%d", ring->me);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
@@ -475,7 +475,7 @@ static int uvd_v7_0_sw_init(void *handle)
                        }
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index 0e2945baf0f157430452faccb5a184d7d7505b79..c7d28c169be56f4759fa242e213146dbaee1b868 100644 (file)
@@ -433,9 +433,8 @@ static int vce_v2_0_sw_init(void *handle)
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512,
-                                    &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 6d9108fa22e0f21e08ffee13b2f78f1de1f069ec..3b82fb289ef6a3afa69a73c230b16377d8631fea 100644 (file)
@@ -443,7 +443,7 @@ static int vce_v3_0_sw_init(void *handle)
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 37fa163393fd4dacd949a9298c17b3c2016769c0..8e238dea7bef1976747537ada6877bc3611162fd 100644 (file)
@@ -477,7 +477,7 @@ static int vce_v4_0_sw_init(void *handle)
                                ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
                }
                r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 6117931fa8d798275219aceea41eee0ed15db652..51a773a37a354ee8d73a126e6326bf9fe15c65c5 100644 (file)
@@ -129,7 +129,7 @@ static int vcn_v1_0_sw_init(void *handle)
        ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -148,7 +148,7 @@ static int vcn_v1_0_sw_init(void *handle)
                ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index d63198c945bf06f526443989e07a8363fd06423b..116b9643d5bab315fd890dad877eaf213dbe4c24 100644 (file)
@@ -136,7 +136,7 @@ static int vcn_v2_0_sw_init(void *handle)
 
        sprintf(ring->name, "vcn_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -167,7 +167,7 @@ static int vcn_v2_0_sw_init(void *handle)
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
                sprintf(ring->name, "vcn_enc%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 87ec883f7e0674aca6b6b8825857075e064ea25e..948813d7caa024221679ded544247602bbf72b88 100644 (file)
@@ -189,7 +189,7 @@ static int vcn_v2_5_sw_init(void *handle)
                                (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
                sprintf(ring->name, "vcn_dec_%d", j);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
-                                    0, AMDGPU_RING_PRIO_DEFAULT);
+                                    0, AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
@@ -203,7 +203,7 @@ static int vcn_v2_5_sw_init(void *handle)
                        sprintf(ring->name, "vcn_enc_%d.%d", j, i);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->vcn.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index b61d1ba1aa9db00053781d545a8a291292847097..3f15bf34123a95663326dfd2cfa1e5133fdb75e7 100644 (file)
@@ -50,6 +50,9 @@
 #define VCN_INSTANCES_SIENNA_CICHLID                           2
 #define DEC_SW_RING_ENABLED                                    FALSE
 
+#define RDECODE_MSG_CREATE                                     0x00000000
+#define RDECODE_MESSAGE_CREATE                                 0x00000001
+
 static int amdgpu_ih_clientid_vcns[] = {
        SOC15_IH_CLIENTID_VCN,
        SOC15_IH_CLIENTID_VCN1
@@ -171,6 +174,7 @@ static int vcn_v3_0_sw_init(void *handle)
 
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
                volatile struct amdgpu_fw_shared *fw_shared;
+
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
@@ -198,6 +202,8 @@ static int vcn_v3_0_sw_init(void *handle)
                if (r)
                        return r;
 
+               atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
                ring = &adev->vcn.inst[i].ring_dec;
                ring->use_doorbell = true;
                if (amdgpu_sriov_vf(adev)) {
@@ -205,11 +211,10 @@ static int vcn_v3_0_sw_init(void *handle)
                } else {
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
                }
-               if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
-                       ring->no_scheduler = true;
                sprintf(ring->name, "vcn_dec_%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT,
+                                    &adev->vcn.inst[i].sched_score);
                if (r)
                        return r;
 
@@ -227,11 +232,10 @@ static int vcn_v3_0_sw_init(void *handle)
                        } else {
                                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
                        }
-                       if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
-                               ring->no_scheduler = true;
                        sprintf(ring->name, "vcn_enc_%d.%d", i, j);
                        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT,
+                                            &adev->vcn.inst[i].sched_score);
                        if (r)
                                return r;
                }
@@ -1844,6 +1848,132 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+{
+       struct drm_gpu_scheduler **scheds;
+
+       /* The create msg must be in the first IB submitted */
+       if (atomic_read(&p->entity->fence_seq))
+               return -EINVAL;
+
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+               [AMDGPU_RING_PRIO_DEFAULT].sched;
+       drm_sched_entity_modify_sched(p->entity, scheds, 1);
+       return 0;
+}
+
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       struct amdgpu_bo_va_mapping *map;
+       uint32_t *msg, num_buffers;
+       struct amdgpu_bo *bo;
+       uint64_t start, end;
+       unsigned int i;
+       void * ptr;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
+       if (r) {
+               DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+               return r;
+       }
+
+       start = map->start * AMDGPU_GPU_PAGE_SIZE;
+       end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
+       if (addr & 0x7) {
+               DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+               return -EINVAL;
+       }
+
+       bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (r) {
+               DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
+               return r;
+       }
+
+       r = amdgpu_bo_kmap(bo, &ptr);
+       if (r) {
+               DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
+               return r;
+       }
+
+       msg = ptr + addr - start;
+
+       /* Check length */
+       if (msg[1] > end - addr) {
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (msg[3] != RDECODE_MSG_CREATE)
+               goto out;
+
+       num_buffers = msg[2];
+       for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
+               uint32_t offset, size, *create;
+
+               if (msg[0] != RDECODE_MESSAGE_CREATE)
+                       continue;
+
+               offset = msg[1];
+               size = msg[2];
+
+               if (offset + size > end) {
+                       r = -EINVAL;
+                       goto out;
+               }
+
+               create = ptr + addr + offset - start;
+
+               /* H246, HEVC and VP9 can run on any instance */
+               if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+                       continue;
+
+               r = vcn_v3_0_limit_sched(p);
+               if (r)
+                       goto out;
+       }
+
+out:
+       amdgpu_bo_kunmap(bo);
+       return r;
+}
+
+static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+                                          uint32_t ib_idx)
+{
+       struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+       struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+       uint32_t msg_lo = 0, msg_hi = 0;
+       unsigned i;
+       int r;
+
+       /* The first instance can decode anything */
+       if (!ring->me)
+               return 0;
+
+       for (i = 0; i < ib->length_dw; i += 2) {
+               uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+               uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
+
+               if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+                       msg_lo = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+                       msg_hi = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+                          val == 0) {
+                       r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+                       if (r)
+                               return r;
+               }
+       }
+       return 0;
+}
+
 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
@@ -1851,6 +1981,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
        .get_rptr = vcn_v3_0_dec_ring_get_rptr,
        .get_wptr = vcn_v3_0_dec_ring_get_wptr,
        .set_wptr = vcn_v3_0_dec_ring_set_wptr,
+       .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
        .emit_frame_size =
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
index 6c3cb3513b98c4813c74874324f8ac28b595e0d6..8a122b413bf5d929ff1f7dadb2842e5fe0c90c5b 100644 (file)
@@ -264,10 +264,10 @@ static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
 {
        uint32_t tmp;
 
-       /* vega20 ih reroute will go through psp
-        * this function is only used for arcturus
+       /* vega20 ih reroute will go through psp this
+        * function is used for newer asics starting arcturus
         */
-       if (adev->asic_type == CHIP_ARCTURUS) {
+       if (adev->asic_type >= CHIP_ARCTURUS) {
                /* Reroute to IH ring 1 for VMC */
                WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
                tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
index 6802c616e10e40162c994f612e8959a9d3fea08a..43de260b2230878346215a744877ac3580472394 100644 (file)
@@ -870,52 +870,47 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
 {
        struct kfd_ioctl_get_process_apertures_args *args = data;
        struct kfd_process_device_apertures *pAperture;
-       struct kfd_process_device *pdd;
+       int i;
 
        dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 
        args->num_of_nodes = 0;
 
        mutex_lock(&p->mutex);
+       /* Run over all pdd of the process */
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               pAperture =
+                       &args->process_apertures[args->num_of_nodes];
+               pAperture->gpu_id = pdd->dev->id;
+               pAperture->lds_base = pdd->lds_base;
+               pAperture->lds_limit = pdd->lds_limit;
+               pAperture->gpuvm_base = pdd->gpuvm_base;
+               pAperture->gpuvm_limit = pdd->gpuvm_limit;
+               pAperture->scratch_base = pdd->scratch_base;
+               pAperture->scratch_limit = pdd->scratch_limit;
 
-       /*if the process-device list isn't empty*/
-       if (kfd_has_process_device_data(p)) {
-               /* Run over all pdd of the process */
-               pdd = kfd_get_first_process_device_data(p);
-               do {
-                       pAperture =
-                               &args->process_apertures[args->num_of_nodes];
-                       pAperture->gpu_id = pdd->dev->id;
-                       pAperture->lds_base = pdd->lds_base;
-                       pAperture->lds_limit = pdd->lds_limit;
-                       pAperture->gpuvm_base = pdd->gpuvm_base;
-                       pAperture->gpuvm_limit = pdd->gpuvm_limit;
-                       pAperture->scratch_base = pdd->scratch_base;
-                       pAperture->scratch_limit = pdd->scratch_limit;
-
-                       dev_dbg(kfd_device,
-                               "node id %u\n", args->num_of_nodes);
-                       dev_dbg(kfd_device,
-                               "gpu id %u\n", pdd->dev->id);
-                       dev_dbg(kfd_device,
-                               "lds_base %llX\n", pdd->lds_base);
-                       dev_dbg(kfd_device,
-                               "lds_limit %llX\n", pdd->lds_limit);
-                       dev_dbg(kfd_device,
-                               "gpuvm_base %llX\n", pdd->gpuvm_base);
-                       dev_dbg(kfd_device,
-                               "gpuvm_limit %llX\n", pdd->gpuvm_limit);
-                       dev_dbg(kfd_device,
-                               "scratch_base %llX\n", pdd->scratch_base);
-                       dev_dbg(kfd_device,
-                               "scratch_limit %llX\n", pdd->scratch_limit);
-
-                       args->num_of_nodes++;
-
-                       pdd = kfd_get_next_process_device_data(p, pdd);
-               } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
-       }
+               dev_dbg(kfd_device,
+                       "node id %u\n", args->num_of_nodes);
+               dev_dbg(kfd_device,
+                       "gpu id %u\n", pdd->dev->id);
+               dev_dbg(kfd_device,
+                       "lds_base %llX\n", pdd->lds_base);
+               dev_dbg(kfd_device,
+                       "lds_limit %llX\n", pdd->lds_limit);
+               dev_dbg(kfd_device,
+                       "gpuvm_base %llX\n", pdd->gpuvm_base);
+               dev_dbg(kfd_device,
+                       "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+               dev_dbg(kfd_device,
+                       "scratch_base %llX\n", pdd->scratch_base);
+               dev_dbg(kfd_device,
+                       "scratch_limit %llX\n", pdd->scratch_limit);
 
+               if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
+                       break;
+       }
        mutex_unlock(&p->mutex);
 
        return 0;
@@ -926,9 +921,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 {
        struct kfd_ioctl_get_process_apertures_new_args *args = data;
        struct kfd_process_device_apertures *pa;
-       struct kfd_process_device *pdd;
-       uint32_t nodes = 0;
        int ret;
+       int i;
 
        dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 
@@ -937,17 +931,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
                 * sufficient memory
                 */
                mutex_lock(&p->mutex);
-
-               if (!kfd_has_process_device_data(p))
-                       goto out_unlock;
-
-               /* Run over all pdd of the process */
-               pdd = kfd_get_first_process_device_data(p);
-               do {
-                       args->num_of_nodes++;
-                       pdd = kfd_get_next_process_device_data(p, pdd);
-               } while (pdd);
-
+               args->num_of_nodes = p->n_pdds;
                goto out_unlock;
        }
 
@@ -962,22 +946,23 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 
        mutex_lock(&p->mutex);
 
-       if (!kfd_has_process_device_data(p)) {
+       if (!p->n_pdds) {
                args->num_of_nodes = 0;
                kfree(pa);
                goto out_unlock;
        }
 
        /* Run over all pdd of the process */
-       pdd = kfd_get_first_process_device_data(p);
-       do {
-               pa[nodes].gpu_id = pdd->dev->id;
-               pa[nodes].lds_base = pdd->lds_base;
-               pa[nodes].lds_limit = pdd->lds_limit;
-               pa[nodes].gpuvm_base = pdd->gpuvm_base;
-               pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
-               pa[nodes].scratch_base = pdd->scratch_base;
-               pa[nodes].scratch_limit = pdd->scratch_limit;
+       for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               pa[i].gpu_id = pdd->dev->id;
+               pa[i].lds_base = pdd->lds_base;
+               pa[i].lds_limit = pdd->lds_limit;
+               pa[i].gpuvm_base = pdd->gpuvm_base;
+               pa[i].gpuvm_limit = pdd->gpuvm_limit;
+               pa[i].scratch_base = pdd->scratch_base;
+               pa[i].scratch_limit = pdd->scratch_limit;
 
                dev_dbg(kfd_device,
                        "gpu id %u\n", pdd->dev->id);
@@ -993,17 +978,14 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
                        "scratch_base %llX\n", pdd->scratch_base);
                dev_dbg(kfd_device,
                        "scratch_limit %llX\n", pdd->scratch_limit);
-               nodes++;
-
-               pdd = kfd_get_next_process_device_data(p, pdd);
-       } while (pdd && (nodes < args->num_of_nodes));
+       }
        mutex_unlock(&p->mutex);
 
-       args->num_of_nodes = nodes;
+       args->num_of_nodes = i;
        ret = copy_to_user(
                        (void __user *)args->kfd_process_device_apertures_ptr,
                        pa,
-                       (nodes * sizeof(struct kfd_process_device_apertures)));
+                       (i * sizeof(struct kfd_process_device_apertures)));
        kfree(pa);
        return ret ? -EFAULT : 0;
 
index b258a3dae767f83ee1cd07c1c189ca82c371ba1f..159add0f5aaae26fe0a7c8824d93f0a1127a5808 100644 (file)
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
        /* Wait till CP writes sync code: */
        status = amdkfd_fence_wait_timeout(
-                       (unsigned int *) rm_state,
+                       rm_state,
                        QUEUESTATE__ACTIVE, 1500);
 
        kfd_gtt_sa_free(dbgdev->dev, mem_obj);
index 511712c2e382dd3cb5fc95445d8a848190af6fcd..673d5e34f213c3ebf840ae09812cf41c7fe1daf6 100644 (file)
@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
 
        return single_open(file, show, NULL);
 }
+static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
+{
+       seq_printf(m, "echo gpu_id > hang_hws\n");
+       return 0;
+}
 
 static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
        const char __user *user_buf, size_t size, loff_t *ppos)
@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
        debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
                            kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
        debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
-                           NULL, &kfd_debugfs_hang_hws_fops);
+                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
 }
 
 void kfd_debugfs_fini(void)
index f860cd7059612af25df5b772a9a93ff0e38859f3..357b9bf62a1cfc272dcc963bf9f3cc4211c0155c 100644 (file)
@@ -1322,7 +1322,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
 
 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
 {
-       if (kfd)
+       if (kfd && kfd->init_complete)
                kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
 }
 
index 965f9f230045c7603b72d988a7a2870265f8f4df..d3eaa1549bd784f0f9486e2826c502d6906e49d5 100644 (file)
@@ -1180,7 +1180,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
        if (retval)
                goto fail_allocate_vidmem;
 
-       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
        dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
 
        init_interrupts(dqm);
@@ -1353,8 +1353,8 @@ out:
        return retval;
 }
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                               unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                               uint64_t fence_value,
                                unsigned int timeout_ms)
 {
        unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
index dd0d8834b4324399f4d18d779dcb94b44946d522..71e2fde56b2b7a2d4c745b6bfaae7d49818c3c77 100644 (file)
@@ -187,7 +187,7 @@ struct device_queue_manager {
        uint16_t                vmid_pasid[VMID_NUM];
        uint64_t                pipelines_addr;
        uint64_t                fence_gpu_addr;
-       unsigned int            *fence_addr;
+       uint64_t                *fence_addr;
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
index 9318936aa80549d9b8c489cc6755a1ad51edacb5..5a1f2433632b2afbd4cc3aa7887fbbdde5a0e903 100644 (file)
@@ -135,11 +135,11 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
  */
 void kfd_iommu_unbind_process(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               if (pdd->bound == PDD_BOUND)
-                       amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+       for (i = 0; i < p->n_pdds; i++)
+               if (p->pdds[i]->bound == PDD_BOUND)
+                       amd_iommu_unbind_pasid(p->pdds[i]->dev->pdev, p->pasid);
 }
 
 /* Callback for process shutdown invoked by the IOMMU driver */
index d903f694acbaddc6aaccdb7e33468702cfb0b610..e840dd581719c06967077410cc1746e5aae86149 100644 (file)
@@ -348,7 +348,7 @@ fail_create_runlist_ib:
 }
 
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                       uint32_t fence_value)
+                       uint64_t fence_value)
 {
        uint32_t *buffer, size;
        int retval = 0;
index dfaf771a42e66e47800f0cbb55ea4dca460a09a8..e3ba0cd3b6fa717966e179acf6b2e36e621ddc21 100644 (file)
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index a852e0d7d804fdba2053808ff6520d860deb4d6f..08442e7d9944074d8da030261610a24d01f544c2 100644 (file)
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index d8c8b5ff449aaf6853d67ba001f8e4cb43a5481e..0b6595f7acdaae36be4e05539155a17a5fd69cdd 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/swap.h>
 
 #include "amd_shared.h"
+#include "amdgpu.h"
 
 #define KFD_MAX_RING_ENTRY_SIZE        8
 
@@ -649,12 +650,6 @@ enum kfd_pdd_bound {
 
 /* Data that is per-process-per device. */
 struct kfd_process_device {
-       /*
-        * List of all per-device data for a process.
-        * Starts from kfd_process.per_device_data.
-        */
-       struct list_head per_device_list;
-
        /* The device that owns this data. */
        struct kfd_dev *dev;
 
@@ -771,10 +766,11 @@ struct kfd_process {
        u32 pasid;
 
        /*
-        * List of kfd_process_device structures,
+        * Array of kfd_process_device pointers,
         * one for each device the process is using.
         */
-       struct list_head per_device_data;
+       struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
+       uint32_t n_pdds;
 
        struct process_queue_manager pqm;
 
@@ -872,14 +868,6 @@ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
                                        int handle);
 
-/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(
-                                                       struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(
-                                               struct kfd_process *p,
-                                               struct kfd_process_device *pdd);
-bool kfd_has_process_device_data(struct kfd_process *p);
-
 /* PASIDs */
 int kfd_pasid_init(void);
 void kfd_pasid_exit(void);
@@ -1012,8 +1000,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                       u32 *ctl_stack_used_size,
                       u32 *save_area_used_size);
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                             unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                             uint64_t fence_value,
                              unsigned int timeout_ms);
 
 /* Packet Manager */
@@ -1049,7 +1037,7 @@ struct packet_manager_funcs {
                        uint32_t filter_param, bool reset,
                        unsigned int sdma_engine);
        int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value);
+                       uint64_t fence_address, uint64_t fence_value);
        int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
 
        /* Packet sizes */
@@ -1071,7 +1059,7 @@ int pm_send_set_resources(struct packet_manager *pm,
                                struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                               uint32_t fence_value);
+                               uint64_t fence_value);
 
 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
                        enum kfd_unmap_queues_filter mode,
index f5237997fa186e3cada629c675b166539e61a885..d4241d29ea94fa4afbd706e546a92d97e5a06733 100644 (file)
@@ -505,7 +505,7 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
 static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
 {
        int ret = 0;
-       struct kfd_process_device *pdd;
+       int i;
        char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
 
        if (!p)
@@ -520,7 +520,8 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
         * - proc/<pid>/stats_<gpuid>/evicted_ms
         * - proc/<pid>/stats_<gpuid>/cu_occupancy
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
                struct kobject *kobj_stats;
 
                snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
@@ -571,7 +572,7 @@ err:
 static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
 {
        int ret = 0;
-       struct kfd_process_device *pdd;
+       int i;
 
        if (!p)
                return -EINVAL;
@@ -584,7 +585,9 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
         * - proc/<pid>/vram_<gpuid>
         * - proc/<pid>/sdma_<gpuid>
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
                         pdd->dev->id);
                ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
@@ -881,21 +884,23 @@ void kfd_unref_process(struct kfd_process *p)
        kref_put(&p->ref, kfd_process_ref_release);
 }
 
+
 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 {
        struct kfd_process *p = pdd->process;
        void *mem;
        int id;
+       int i;
 
        /*
         * Remove all handles from idr and release appropriate
         * local memory object
         */
        idr_for_each_entry(&pdd->alloc_idr, mem, id) {
-               struct kfd_process_device *peer_pdd;
 
-               list_for_each_entry(peer_pdd, &p->per_device_data,
-                                   per_device_list) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *peer_pdd = p->pdds[i];
+
                        if (!peer_pdd->vm)
                                continue;
                        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -909,18 +914,19 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 
 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               kfd_process_device_free_bos(pdd);
+       for (i = 0; i < p->n_pdds; i++)
+               kfd_process_device_free_bos(p->pdds[i]);
 }
 
 static void kfd_process_destroy_pdds(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd, *temp;
+       int i;
+
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
 
-       list_for_each_entry_safe(pdd, temp, &p->per_device_data,
-                                per_device_list) {
                pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
                                pdd->dev->id, p->pasid);
 
@@ -933,8 +939,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                        amdgpu_amdkfd_gpuvm_destroy_process_vm(
                                pdd->dev->kgd, pdd->vm);
 
-               list_del(&pdd->per_device_list);
-
                if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
                        free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
                                get_order(KFD_CWSR_TBA_TMA_SIZE));
@@ -955,7 +959,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                }
 
                kfree(pdd);
+               p->pdds[i] = NULL;
        }
+       p->n_pdds = 0;
 }
 
 /* No process locking is needed in this function, because the process
@@ -967,7 +973,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
-       struct kfd_process_device *pdd;
+       int i;
 
        /* Remove the procfs files */
        if (p->kobj) {
@@ -976,7 +982,9 @@ static void kfd_process_wq_release(struct work_struct *work)
                kobject_put(p->kobj_queues);
                p->kobj_queues = NULL;
 
-               list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *pdd = p->pdds[i];
+
                        sysfs_remove_file(p->kobj, &pdd->attr_vram);
                        sysfs_remove_file(p->kobj, &pdd->attr_sdma);
                        sysfs_remove_file(p->kobj, &pdd->attr_evict);
@@ -1036,7 +1044,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
                                        struct mm_struct *mm)
 {
        struct kfd_process *p;
-       struct kfd_process_device *pdd = NULL;
+       int i;
 
        /*
         * The kfd_process structure can not be free because the
@@ -1060,8 +1068,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
         * pdd is in debug mode, we should first force unregistration,
         * then we will be able to destroy the queues
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
-               struct kfd_dev *dev = pdd->dev;
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_dev *dev = p->pdds[i]->dev;
 
                mutex_lock(kfd_get_dbgmgr_mutex());
                if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
@@ -1098,11 +1106,11 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 {
        unsigned long  offset;
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
-               struct kfd_dev *dev = pdd->dev;
-               struct qcm_process_device *qpd = &pdd->qpd;
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_dev *dev = p->pdds[i]->dev;
+               struct qcm_process_device *qpd = &p->pdds[i]->qpd;
 
                if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
                        continue;
@@ -1199,7 +1207,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        mutex_init(&process->mutex);
        process->mm = thread->mm;
        process->lead_thread = thread->group_leader;
-       INIT_LIST_HEAD(&process->per_device_data);
+       process->n_pdds = 0;
        INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
        INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
        process->last_restore_timestamp = get_jiffies_64();
@@ -1290,11 +1298,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
                                                        struct kfd_process *p)
 {
-       struct kfd_process_device *pdd = NULL;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               if (pdd->dev == dev)
-                       return pdd;
+       for (i = 0; i < p->n_pdds; i++)
+               if (p->pdds[i]->dev == dev)
+                       return p->pdds[i];
 
        return NULL;
 }
@@ -1304,6 +1312,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 {
        struct kfd_process_device *pdd = NULL;
 
+       if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
+               return NULL;
        pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
        if (!pdd)
                return NULL;
@@ -1332,7 +1342,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->vram_usage = 0;
        pdd->sdma_past_activity_counter = 0;
        atomic64_set(&pdd->evict_duration_counter, 0);
-       list_add(&pdd->per_device_list, &p->per_device_data);
+       p->pdds[p->n_pdds++] = pdd;
 
        /* Init idr used for memory handle translation */
        idr_init(&pdd->alloc_idr);
@@ -1464,28 +1474,6 @@ out:
        return ERR_PTR(err);
 }
 
-struct kfd_process_device *kfd_get_first_process_device_data(
-                                               struct kfd_process *p)
-{
-       return list_first_entry(&p->per_device_data,
-                               struct kfd_process_device,
-                               per_device_list);
-}
-
-struct kfd_process_device *kfd_get_next_process_device_data(
-                                               struct kfd_process *p,
-                                               struct kfd_process_device *pdd)
-{
-       if (list_is_last(&pdd->per_device_list, &p->per_device_data))
-               return NULL;
-       return list_next_entry(pdd, per_device_list);
-}
-
-bool kfd_has_process_device_data(struct kfd_process *p)
-{
-       return !(list_empty(&p->per_device_data));
-}
-
 /* Create specific handle mapped to mem from process local memory idr
  * Assumes that the process lock is held.
  */
@@ -1561,11 +1549,13 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
  */
 int kfd_process_evict_queues(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
        int r = 0;
+       int i;
        unsigned int n_evicted = 0;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
                                                            &pdd->qpd);
                if (r) {
@@ -1581,7 +1571,9 @@ fail:
        /* To keep state consistent, roll back partial eviction by
         * restoring queues
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                if (n_evicted == 0)
                        break;
                if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
@@ -1597,10 +1589,12 @@ fail:
 /* kfd_process_restore_queues - Restore all user queues of a process */
 int kfd_process_restore_queues(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
        int r, ret = 0;
+       int i;
+
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
                r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
                                                              &pdd->qpd);
                if (r) {
index eb1635ac89887c18534e1ce1e969469386fcbf45..95a6c36cea4c6af24d7e50fffaf8a864bcf50e1a 100644 (file)
@@ -126,10 +126,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
 
 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               kfd_process_dequeue_from_device(pdd);
+       for (i = 0; i < p->n_pdds; i++)
+               kfd_process_dequeue_from_device(p->pdds[i]);
 }
 
 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
index 17d1736367ea3e7ab9dddb123594434b57504b2e..2465224235593ce8660f78375941148541ab9565 100644 (file)
@@ -81,7 +81,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
        struct kfd_smi_client *client = filep->private_data;
        unsigned char *buf;
 
-       buf = kmalloc(MAX_KFIFO_SIZE * sizeof(*buf), GFP_KERNEL);
+       buf = kmalloc_array(MAX_KFIFO_SIZE, sizeof(*buf), GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
index 00edf78975b1b775954377c0b6d2282235e04b6b..a0c8c41e4e57ee204cdfb6d629b4a660c2cb273d 100644 (file)
@@ -121,7 +121,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
  * DOC: overview
  *
  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
- * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
  * requests into DC requests, and DC responses into DRM responses.
  *
  * The root control structure is &struct amdgpu_display_manager.
@@ -130,6 +130,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 /* basic init/fini API */
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
 
 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
 {
@@ -371,14 +372,14 @@ static void dm_pflip_high_irq(void *interrupt_params)
        /* IRQ could occur when in initial stage */
        /* TODO work and BO cleanup */
        if (amdgpu_crtc == NULL) {
-               DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+               DC_LOG_PFLIP("CRTC is null, returning.\n");
                return;
        }
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
-               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
                                                 amdgpu_crtc->pflip_status,
                                                 AMDGPU_FLIP_SUBMITTED,
                                                 amdgpu_crtc->crtc_id,
@@ -449,9 +450,9 @@ static void dm_pflip_high_irq(void *interrupt_params)
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
-       DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
-                        amdgpu_crtc->crtc_id, amdgpu_crtc,
-                        vrr_active, (int) !e);
+       DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+                    amdgpu_crtc->crtc_id, amdgpu_crtc,
+                    vrr_active, (int) !e);
 }
 
 static void dm_vupdate_high_irq(void *interrupt_params)
@@ -459,6 +460,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
+       struct drm_device *drm_dev;
+       struct drm_vblank_crtc *vblank;
+       ktime_t frame_duration_ns, previous_timestamp;
        unsigned long flags;
        int vrr_active;
 
@@ -466,8 +470,19 @@ static void dm_vupdate_high_irq(void *interrupt_params)
 
        if (acrtc) {
                vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+               drm_dev = acrtc->base.dev;
+               vblank = &drm_dev->vblank[acrtc->base.index];
+               previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+               frame_duration_ns = vblank->time - previous_timestamp;
 
-               DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+               if (frame_duration_ns > 0) {
+                       trace_amdgpu_refresh_rate_track(acrtc->base.index,
+                                               frame_duration_ns,
+                                               ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+                       atomic64_set(&irq_params->previous_timestamp, vblank->time);
+               }
+
+               DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
                              acrtc->crtc_id,
                              vrr_active);
 
@@ -520,7 +535,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
 
        vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 
-       DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+       DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
                      vrr_active, acrtc->dm_irq_params.active_planes);
 
        /**
@@ -923,6 +938,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DMUB_TRACE_MAX_READ 64
+static void dm_dmub_trace_high_irq(void *interrupt_params)
+{
+       struct common_irq_params *irq_params = interrupt_params;
+       struct amdgpu_device *adev = irq_params->adev;
+       struct amdgpu_display_manager *dm = &adev->dm;
+       struct dmcub_trace_buf_entry entry = { 0 };
+       uint32_t count = 0;
+
+       do {
+               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+                                                       entry.param0, entry.param1);
+
+                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+               } else
+                       break;
+
+               count++;
+
+       } while (count <= DMUB_TRACE_MAX_READ);
+
+       ASSERT(count <= DMUB_TRACE_MAX_READ);
+}
+
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
        uint64_t pt_base;
@@ -987,13 +1028,12 @@ static void event_mall_stutter(struct work_struct *work)
 
        if (vblank_work->enable)
                dm->active_vblank_irq_count++;
-       else
+       else if(dm->active_vblank_irq_count)
                dm->active_vblank_irq_count--;
 
        dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
 
-       DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
-
+       DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 
        mutex_unlock(&dm->dc_lock);
 }
@@ -1809,8 +1849,8 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
                if (acrtc && state->stream_status[i].plane_count != 0) {
                        irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
                        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
-                       DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
-                                 acrtc->crtc_id, enable ? "en" : "dis", rc);
+                       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+                                     acrtc->crtc_id, enable ? "en" : "dis", rc);
                        if (rc)
                                DRM_WARN("Failed to %s pflip interrupts\n",
                                         enable ? "enable" : "disable");
@@ -3104,6 +3144,28 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 
        }
 
+       if (dc->ctx->dmub_srv) {
+               i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
+
+               if (r) {
+                       DRM_ERROR("Failed to add dmub trace irq id!\n");
+                       return r;
+               }
+
+               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+               int_params.irq_source =
+                       dc_interrupt_to_irq_source(dc, i, 0);
+
+               c_irq_params = &adev->dm.dmub_trace_params[0];
+
+               c_irq_params->adev = adev;
+               c_irq_params->irq_src = int_params.irq_source;
+
+               amdgpu_dm_irq_register_interrupt(adev, &int_params,
+                               dm_dmub_trace_high_irq, c_irq_params);
+       }
+
        /* HPD */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
                        &adev->hpd_irq);
@@ -4892,8 +4954,8 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
        stream->src = src;
        stream->dst = dst;
 
-       DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
-                       dst.x, dst.y, dst.width, dst.height);
+       DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
+                     dst.x, dst.y, dst.width, dst.height);
 
 }
 
@@ -5106,15 +5168,27 @@ static void fill_stream_properties_from_drm_display_mode(
                timing_out->hdmi_vic = hv_frame.vic;
        }
 
-       timing_out->h_addressable = mode_in->hdisplay;
-       timing_out->h_total = mode_in->htotal;
-       timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
-       timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
-       timing_out->v_total = mode_in->vtotal;
-       timing_out->v_addressable = mode_in->vdisplay;
-       timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
-       timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
-       timing_out->pix_clk_100hz = mode_in->clock * 10;
+       if (is_freesync_video_mode(mode_in, aconnector)) {
+               timing_out->h_addressable = mode_in->hdisplay;
+               timing_out->h_total = mode_in->htotal;
+               timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+               timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+               timing_out->v_total = mode_in->vtotal;
+               timing_out->v_addressable = mode_in->vdisplay;
+               timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+               timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+               timing_out->pix_clk_100hz = mode_in->clock * 10;
+       } else {
+               timing_out->h_addressable = mode_in->crtc_hdisplay;
+               timing_out->h_total = mode_in->crtc_htotal;
+               timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+               timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+               timing_out->v_total = mode_in->crtc_vtotal;
+               timing_out->v_addressable = mode_in->crtc_vdisplay;
+               timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+               timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+               timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+       }
 
        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 
@@ -5234,9 +5308,14 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
 static void set_multisync_trigger_params(
                struct dc_stream_state *stream)
 {
+       struct dc_stream_state *master = NULL;
+
        if (stream->triggered_crtc_reset.enabled) {
-               stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
-               stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+               master = stream->triggered_crtc_reset.event_source;
+               stream->triggered_crtc_reset.event =
+                       master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+                       CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+               stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
        }
 }
 
@@ -5266,6 +5345,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 {
        int i = 0;
+       struct dc_stream_state *stream;
 
        if (context->stream_count < 2)
                return;
@@ -5277,9 +5357,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
                 * crtc_sync_master.multi_sync_enabled flag
                 * For now it's set to false
                 */
-               set_multisync_trigger_params(context->streams[i]);
        }
+
        set_master_stream(context->streams, context->stream_count);
+
+       for (i = 0; i < context->stream_count ; i++) {
+               stream = context->streams[i];
+
+               if (!stream)
+                       continue;
+
+               set_multisync_trigger_params(stream);
+       }
 }
 
 static struct drm_display_mode *
@@ -5335,7 +5424,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
        return m_pref;
 }
 
-static bool is_freesync_video_mode(struct drm_display_mode *mode,
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
                                   struct amdgpu_dm_connector *aconnector)
 {
        struct drm_display_mode *high_mode;
@@ -5458,7 +5547,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (recalculate_timing)
                drm_mode_set_crtcinfo(&saved_mode, 0);
-       else
+       else if (!dm_state)
                drm_mode_set_crtcinfo(&mode, 0);
 
        /*
@@ -5636,8 +5725,8 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 
        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
 
-       DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
-                        acrtc->crtc_id, enable ? "en" : "dis", rc);
+       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+                     acrtc->crtc_id, enable ? "en" : "dis", rc);
        return rc;
 }
 
@@ -6075,6 +6164,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        } while (stream == NULL && requested_bpc >= 6);
 
+       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+               aconnector->force_yuv420_output = true;
+               stream = create_validate_stream_for_sink(aconnector, drm_mode,
+                                               dm_state, old_stream);
+               aconnector->force_yuv420_output = false;
+       }
+
        return stream;
 }
 
@@ -6577,7 +6675,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        int r;
 
        if (!new_state->fb) {
-               DRM_DEBUG_DRIVER("No FB bound\n");
+               DRM_DEBUG_KMS("No FB bound\n");
                return 0;
        }
 
@@ -7295,7 +7393,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
 
        if (!(amdgpu_freesync_vid_mode && edid))
                return;
-       
+
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
                amdgpu_dm_connector->num_modes +=
                        add_fs_modes(amdgpu_dm_connector);
@@ -7810,11 +7908,11 @@ static void handle_cursor_update(struct drm_plane *plane,
        if (!plane->state->fb && !old_plane_state->fb)
                return;
 
-       DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
-                        __func__,
-                        amdgpu_crtc->crtc_id,
-                        plane->state->crtc_w,
-                        plane->state->crtc_h);
+       DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
+                     __func__,
+                     amdgpu_crtc->crtc_id,
+                     plane->state->crtc_w,
+                     plane->state->crtc_h);
 
        ret = get_cursor_position(plane, crtc, &position);
        if (ret)
@@ -7872,8 +7970,8 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
        /* Mark this event as consumed */
        acrtc->base.state->event = NULL;
 
-       DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
-                                                acrtc->crtc_id);
+       DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+                    acrtc->crtc_id);
 }
 
 static void update_freesync_state_on_stream(
@@ -8179,7 +8277,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        &bundle->flip_addrs[planes_count].address,
                        afb->tmz_surface, false);
 
-               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+               DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
                                 new_plane_state->plane->index,
                                 bundle->plane_infos[planes_count].dcc.enable);
 
@@ -8213,7 +8311,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                dc_plane,
                                bundle->flip_addrs[planes_count].flip_timestamp_in_us);
 
-               DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+               DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
                                 __func__,
                                 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
                                 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
@@ -8535,7 +8633,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
-               DRM_DEBUG_DRIVER(
+               DRM_DEBUG_ATOMIC(
                        "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
                        "planes_changed:%d, mode_changed:%d,active_changed:%d,"
                        "connectors_changed:%d\n",
@@ -8569,7 +8667,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
 
-                       DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+                       DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
 
                        if (!dm_new_crtc_state->stream) {
                                /*
@@ -8602,7 +8700,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        crtc->hwmode = new_crtc_state->mode;
                        mode_set_reset_required = true;
                } else if (modereset_required(new_crtc_state)) {
-                       DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+                       DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream)
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -8619,6 +8717,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               /* Allow idle optimization when vblank count is 0 for display off */
+               if (dm->active_vblank_irq_count == 0)
+                   dc_allow_idle_optimizations(dm->dc,true);
+#endif
                mutex_unlock(&dm->dc_lock);
        }
 
@@ -9207,7 +9310,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
        if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
                goto skip_modeset;
 
-       DRM_DEBUG_DRIVER(
+       DRM_DEBUG_ATOMIC(
                "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
                "planes_changed:%d, mode_changed:%d,active_changed:%d,"
                "connectors_changed:%d\n",
@@ -9291,8 +9394,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 
                        dc_stream_retain(new_stream);
 
-                       DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
-                                               crtc->base.id);
+                       DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+                                        crtc->base.id);
 
                        if (dc_add_stream_to_ctx(
                                        dm->dc,
@@ -9637,8 +9740,8 @@ static int dm_update_plane_state(struct dc *dc,
                if (!dc_new_plane_state)
                        return -ENOMEM;
 
-               DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
-                               plane->base.id, new_plane_crtc->base.id);
+               DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+                                plane->base.id, new_plane_crtc->base.id);
 
                ret = fill_dc_plane_attributes(
                        drm_to_adev(new_plane_crtc->dev),
@@ -9701,7 +9804,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
        new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
-       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
+       if (!new_cursor_state || !new_primary_state ||
+           !new_cursor_state->fb || !new_primary_state->fb) {
                return 0;
        }
 
index 8f98d44490aa0748e945df0d7e0fa69afa79e5ad..018943113025d36b8c894219fe48581cf70a8978 100644 (file)
@@ -66,6 +66,7 @@ struct dc_plane_state;
 struct common_irq_params {
        struct amdgpu_device *adev;
        enum dc_irq_source irq_src;
+       atomic64_t previous_timestamp;
 };
 
 /**
@@ -339,6 +340,15 @@ struct amdgpu_display_manager {
        struct common_irq_params
        vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
 
+       /**
+        * @dmub_trace_params:
+        *
+        * DMUB trace event IRQ parameters, passed to registered handlers when
+        * triggered.
+        */
+       struct common_irq_params
+       dmub_trace_params[1];
+
        spinlock_t irq_handler_list_table_lock;
 
        struct backlight_device *backlight_dev;
@@ -385,6 +395,11 @@ struct amdgpu_display_manager {
 #endif
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       /**
+        * @crc_rd_wrk:
+        *
+        * Work to be executed in a separate thread to communicate with PSP.
+        */
        struct crc_rd_work *crc_rd_wrk;
 #endif
 
index c6d6baab106ed1205061544997c640437f1eda12..5cd788b20c216816d8d3b2f397165a895d8bbf9e 100644 (file)
@@ -307,7 +307,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                        goto cleanup;
                }
 
-               aux = &aconn->dm_dp_aux.aux;
+               aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux;
 
                if (!aux) {
                        DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
index 927de7678a4f8742109d926261a0d7980d63700a..9a13f47022df905ad2f726c18496313428d0d256 100644 (file)
@@ -34,6 +34,7 @@
 #include "resource.h"
 #include "dsc.h"
 #include "dc_link_dp.h"
+#include "link_hwss.h"
 #include "dc/dc_dmub_srv.h"
 
 struct dmub_debugfs_trace_header {
@@ -149,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  *
  * --- to get dp configuration
  *
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * It will list current, verified, reported, preferred dp configuration.
  * current -- for current video mode
@@ -162,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  * echo <lane_count>  <link_rate> > link_settings
  *
  * for example, to force to  2 lane, 2.7GHz,
- * echo 4 0xa > link_settings
+ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * spread_spectrum could not be changed dynamically.
  *
@@ -170,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  * done. please check link settings after force operation to see if HW get
  * programming.
  *
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * check current and preferred settings.
  *
@@ -246,7 +247,6 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
 {
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
        struct dc_link *link = connector->dc_link;
-       struct dc *dc = (struct dc *)link->dc;
        struct dc_link_settings prefer_link_settings;
        char *wr_buf = NULL;
        const uint32_t wr_buf_size = 40;
@@ -254,7 +254,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        int max_param_num = 2;
        uint8_t param_nums = 0;
        long param[2];
-       bool valid_input = false;
+       bool valid_input = true;
 
        if (size == 0)
                return -EINVAL;
@@ -281,9 +281,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        case LANE_COUNT_ONE:
        case LANE_COUNT_TWO:
        case LANE_COUNT_FOUR:
-               valid_input = true;
                break;
        default:
+               valid_input = false;
                break;
        }
 
@@ -293,9 +293,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        case LINK_RATE_RBR2:
        case LINK_RATE_HIGH2:
        case LINK_RATE_HIGH3:
-               valid_input = true;
                break;
        default:
+               valid_input = false;
                break;
        }
 
@@ -309,10 +309,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
         * spread spectrum will not be changed
         */
        prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+       prefer_link_settings.use_link_rate_set = false;
        prefer_link_settings.lane_count = param[0];
        prefer_link_settings.link_rate = param[1];
 
-       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+       dp_retrain_link_dp_test(link, &prefer_link_settings, false);
 
        kfree(wr_buf);
        return size;
@@ -399,6 +400,70 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
        return result;
 }
 
+static int dp_lttpr_status_show(struct seq_file *m, void *d)
+{
+       char *data;
+       struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
+       struct dc_link *link = connector->dc_link;
+       uint32_t read_size = 1;
+       uint8_t repeater_count = 0;
+
+       data = kzalloc(read_size, GFP_KERNEL);
+       if (!data)
+               return 0;
+
+       dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
+
+       switch ((uint8_t)*data) {
+       case 0x80:
+               repeater_count = 1;
+               break;
+       case 0x40:
+               repeater_count = 2;
+               break;
+       case 0x20:
+               repeater_count = 3;
+               break;
+       case 0x10:
+               repeater_count = 4;
+               break;
+       case 0x8:
+               repeater_count = 5;
+               break;
+       case 0x4:
+               repeater_count = 6;
+               break;
+       case 0x2:
+               repeater_count = 7;
+               break;
+       case 0x1:
+               repeater_count = 8;
+               break;
+       case 0x0:
+               repeater_count = 0;
+               break;
+       default:
+               repeater_count = (uint8_t)*data;
+               break;
+       }
+
+       seq_printf(m, "phy repeater count: %d\n", repeater_count);
+
+       dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
+
+       if ((uint8_t)*data == 0x55)
+               seq_printf(m, "phy repeater mode: transparent\n");
+       else if ((uint8_t)*data == 0xAA)
+               seq_printf(m, "phy repeater mode: non-transparent\n");
+       else if ((uint8_t)*data == 0x00)
+               seq_printf(m, "phy repeater mode: non lttpr\n");
+       else
+               seq_printf(m, "phy repeater mode: read error\n");
+
+       kfree(data);
+       return 0;
+}
+
 static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
                                 size_t size, loff_t *pos)
 {
@@ -2300,6 +2365,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
+DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
 #endif
@@ -2420,6 +2486,7 @@ static const struct {
 } dp_debugfs_entries[] = {
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
+               {"lttpr_status", &dp_lttpr_status_fops},
                {"test_pattern", &dp_phy_test_pattern_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                {"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -2900,6 +2967,10 @@ static int mst_topo_show(struct seq_file *m, void *unused)
 
                aconnector = to_amdgpu_dm_connector(connector);
 
+               /* Ensure we're only dumping the topology of a root mst node */
+               if (!aconnector->mst_mgr.mst_state)
+                       continue;
+
                seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
                drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
        }
@@ -2909,7 +2980,73 @@ static int mst_topo_show(struct seq_file *m, void *unused)
 }
 
 /*
- * Sets the force_timing_sync debug optino from the given string.
+ * Sets trigger hpd for MST topologies.
+ * All connected connectors will be rediscovered and re started as needed if val of 1 is sent.
+ * All topologies will be disconnected if val of 0 is set .
+ * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_set(void *data, u64 val)
+{
+       struct amdgpu_device *adev = data;
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_connector_list_iter iter;
+       struct amdgpu_dm_connector *aconnector;
+       struct drm_connector *connector;
+       struct dc_link *link = NULL;
+
+       if (val == 1) {
+               drm_connector_list_iter_begin(dev, &iter);
+               drm_for_each_connector_iter(connector, &iter) {
+                       aconnector = to_amdgpu_dm_connector(connector);
+                       if (aconnector->dc_link->type == dc_connection_mst_branch &&
+                           aconnector->mst_mgr.aux) {
+                               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+                               drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+                       }
+               }
+       } else if (val == 0) {
+               drm_connector_list_iter_begin(dev, &iter);
+               drm_for_each_connector_iter(connector, &iter) {
+                       aconnector = to_amdgpu_dm_connector(connector);
+                       if (!aconnector->dc_link)
+                               continue;
+
+                       if (!(aconnector->port && &aconnector->mst_port->mst_mgr))
+                               continue;
+
+                       link = aconnector->dc_link;
+                       dp_receiver_power_ctrl(link, false);
+                       drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
+                       link->mst_stream_alloc_table.stream_count = 0;
+                       memset(link->mst_stream_alloc_table.stream_allocations, 0,
+                                       sizeof(link->mst_stream_alloc_table.stream_allocations));
+               }
+       } else {
+               return 0;
+       }
+       drm_kms_helper_hotplug_event(dev);
+
+       return 0;
+}
+
+/*
+ * The interface doesn't need get function, so it will return the
+ * value of zero
+ * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_get(void *data, u64 *val)
+{
+       *val = 0;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get,
+                        trigger_hpd_mst_set, "%llu\n");
+
+
+/*
+ * Sets the force_timing_sync debug option from the given string.
  * All connected displays will be force synchronized immediately.
  * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync
  */
@@ -2972,6 +3109,64 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo);
 DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
                         visual_confirm_set, "%llu\n");
 
+/*
+ * Dumps the DCC_EN bit for each pipe.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en
+ */
+static ssize_t dcc_en_bits_read(
+       struct file *f,
+       char __user *buf,
+       size_t size,
+       loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       struct dc *dc = adev->dm.dc;
+       char *rd_buf = NULL;
+       const uint32_t rd_buf_size = 32;
+       uint32_t result = 0;
+       int offset = 0;
+       int num_pipes = dc->res_pool->pipe_count;
+       int *dcc_en_bits;
+       int i, r;
+
+       dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL);
+       if (!dcc_en_bits)
+               return -ENOMEM;
+
+       if (!dc->hwss.get_dcc_en_bits) {
+               kfree(dcc_en_bits);
+               return 0;
+       }
+
+       dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+       if (!rd_buf)
+               return -ENOMEM;
+
+       for (i = 0; i < num_pipes; i++)
+               offset += snprintf(rd_buf + offset, rd_buf_size - offset,
+                                  "%d  ", dcc_en_bits[i]);
+       rd_buf[strlen(rd_buf)] = '\n';
+
+       kfree(dcc_en_bits);
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
 void dtn_debugfs_init(struct amdgpu_device *adev)
 {
        static const struct file_operations dtn_log_fops = {
@@ -2980,6 +3175,11 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
                .write = dtn_log_write,
                .llseek = default_llseek
        };
+       static const struct file_operations dcc_en_bits_fops = {
+               .owner = THIS_MODULE,
+               .read = dcc_en_bits_read,
+               .llseek = default_llseek
+       };
 
        struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
@@ -3007,4 +3207,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
 
        debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
                                   adev, &dmcub_trace_event_state_fops);
+
+       debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root,
+                                  adev, &trigger_hpd_mst_ops);
+
+       debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
+                                  &dcc_en_bits_fops);
 }
index 0cdbfcd475ec90bf3c4d036ccdf512422fb2c116..60f91853bd82e715b8df4df85b8164255176857b 100644 (file)
@@ -191,7 +191,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
                                psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
                                            &hdcp_work->srm_version);
 
-                       display->adjust.disable = 0;
+                       display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
                        if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
                                hdcp_w->link.adjust.hdcp1.disable = 0;
                                hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -203,7 +203,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
                        schedule_delayed_work(&hdcp_w->property_validate_dwork,
                                              msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
                } else {
-                       display->adjust.disable = 1;
+                       display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
                        hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
                        cancel_delayed_work(&hdcp_w->property_validate_dwork);
                }
@@ -456,7 +456,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
        link->dp.assr_enabled = config->assr_enabled;
        link->dp.mst_enabled = config->mst_enabled;
-       display->adjust.disable = 1;
+       display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
        link->adjust.auth_delay = 3;
        link->adjust.hdcp1.disable = 0;
 
index 09bdffb3a09e93c9b230cb58f38e5b198a715923..103e29905b5718d29161dd9851d4ded2806cc6db 100644 (file)
@@ -700,6 +700,14 @@ void dm_helpers_free_gpu_mem(
 
 bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable)
 {
-       // TODO
-       return true;
+       enum dc_irq_source irq_source;
+       bool ret;
+
+       irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+
+       ret = dc_interrupt_set(ctx->dc, irq_source, enable);
+
+       DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
+                        enable ? "en" : "dis", ret);
+       return ret;
 }
index d3c687d07ee66e3330bc22770eb8a275c7c19699..b3ed7e777720435f68217477c75729ec0487d6fa 100644 (file)
@@ -73,6 +73,7 @@
  * @handler_arg: Argument passed to the handler when triggered
  * @dm: DM which this handler belongs to
  * @irq_source: DC interrupt source that this handler is registered for
+ * @work: work struct
  */
 struct amdgpu_dm_irq_handler_data {
        struct list_head list;
@@ -184,6 +185,55 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
        return hnd_list;
 }
 
+/**
+ * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ *
+ * Go through low and high context IRQ tables and deallocate handlers.
+ */
+static void unregister_all_irq_handlers(struct amdgpu_device *adev)
+{
+       struct list_head *hnd_list_low;
+       struct list_head *hnd_list_high;
+       struct list_head *entry, *tmp;
+       struct amdgpu_dm_irq_handler_data *handler;
+       unsigned long irq_table_flags;
+       int i;
+
+       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+       for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
+               hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
+               hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
+
+               list_for_each_safe(entry, tmp, hnd_list_low) {
+
+                       handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+                                            list);
+
+                       if (handler == NULL || handler->handler == NULL)
+                               continue;
+
+                       list_del(&handler->list);
+                       kfree(handler);
+               }
+
+               list_for_each_safe(entry, tmp, hnd_list_high) {
+
+                       handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+                                            list);
+
+                       if (handler == NULL || handler->handler == NULL)
+                               continue;
+
+                       list_del(&handler->list);
+                       kfree(handler);
+               }
+       }
+
+       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
 static bool
 validate_irq_registration_params(struct dc_interrupt_params *int_params,
                                 void (*ih)(void *))
@@ -414,6 +464,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
                        }
                }
        }
+       /* Deallocate handlers from the table. */
+       unregister_all_irq_handlers(adev);
 }
 
 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
@@ -731,6 +783,18 @@ static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
                __func__);
 }
 
+static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
+                                          struct amdgpu_irq_src *source,
+                                          unsigned int type,
+                                          enum amdgpu_interrupt_state state)
+{
+       enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+       bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+       dc_interrupt_set(adev->dm.dc, irq_source, st);
+       return 0;
+}
+
 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
        .set = amdgpu_dm_set_crtc_irq_state,
        .process = amdgpu_dm_irq_handler,
@@ -746,6 +810,11 @@ static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
        .process = amdgpu_dm_irq_handler,
 };
 
+static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
+       .set = amdgpu_dm_set_dmub_trace_irq_state,
+       .process = amdgpu_dm_irq_handler,
+};
+
 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
        .set = amdgpu_dm_set_pflip_irq_state,
        .process = amdgpu_dm_irq_handler,
@@ -768,6 +837,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
        adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
        adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 
+       adev->dmub_trace_irq.num_types = 1;
+       adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
+
        adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
        adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
 
index 607ec09994456b519fadc3734ff031c3f43c267d..eba2701216984a2d88547579a95e231fc92ae9a3 100644 (file)
 #include "amdgpu_dm_irq.h"
 #include "amdgpu_pm.h"
 #include "dm_pp_smu.h"
-#include "amdgpu_smu.h"
-
 
 bool dm_pp_apply_display_requirements(
                const struct dc_context *ctx,
                const struct dm_pp_display_configuration *pp_display_cfg)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
        int i;
 
        if (adev->pm.dpm_enabled) {
@@ -106,9 +103,6 @@ bool dm_pp_apply_display_requirements(
                        adev->powerplay.pp_funcs->display_configuration_change(
                                adev->powerplay.pp_handle,
                                &adev->pm.pm_display_cfg);
-               else if (adev->smu.ppt_funcs)
-                       smu_display_configuration_change(smu,
-                                                        &adev->pm.pm_display_cfg);
 
                amdgpu_pm_compute_clocks(adev);
        }
@@ -148,36 +142,6 @@ static void get_default_clock_levels(
        }
 }
 
-static enum smu_clk_type dc_to_smu_clock_type(
-               enum dm_pp_clock_type dm_pp_clk_type)
-{
-       enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
-
-       switch (dm_pp_clk_type) {
-       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-               smu_clk_type = SMU_DISPCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
-               smu_clk_type = SMU_GFXCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
-               smu_clk_type = SMU_MCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_DCEFCLK:
-               smu_clk_type = SMU_DCEFCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_SOCCLK:
-               smu_clk_type = SMU_SOCCLK;
-               break;
-       default:
-               DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
-                         dm_pp_clk_type);
-               break;
-       }
-
-       return smu_clk_type;
-}
-
 static enum amd_pp_clock_type dc_to_pp_clock_type(
                enum dm_pp_clock_type dm_pp_clk_type)
 {
@@ -417,14 +381,8 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
                                                &pp_clks);
                if (ret)
                        return false;
-       } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
-               if (smu_get_clock_by_type_with_latency(&adev->smu,
-                                                      dc_to_smu_clock_type(clk_type),
-                                                      &pp_clks))
-                       return false;
        }
 
-
        pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 
        return true;
@@ -502,10 +460,6 @@ bool dm_pp_apply_clock_for_voltage_request(
                ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
                        adev->powerplay.pp_handle,
                        &pp_clock_request);
-       else if (adev->smu.ppt_funcs &&
-                adev->smu.ppt_funcs->display_clock_voltage_request)
-               ret = smu_display_clock_voltage_request(&adev->smu,
-                                                       &pp_clock_request);
        if (ret)
                return false;
        return true;
@@ -655,8 +609,11 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
 
        return PP_SMU_RESULT_OK;
 }
@@ -665,13 +622,14 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->set_active_display_count)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
-       if (smu_set_display_count(smu, count))
+       if (pp_funcs->set_active_display_count(pp_handle, count))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -682,13 +640,14 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
-       if (smu_set_deep_sleep_dcefclk(smu, mhz))
+       if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -699,10 +658,11 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        clock_req.clock_type = amd_pp_dcef_clock;
@@ -711,7 +671,7 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -722,10 +682,11 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        clock_req.clock_type = amd_pp_mem_clock;
@@ -734,7 +695,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -745,10 +706,14 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
-               return PP_SMU_RESULT_FAIL;
+       if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
+               if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
+                                                                 !pstate_handshake_supported))
+                       return PP_SMU_RESULT_FAIL;
+       }
 
        return PP_SMU_RESULT_OK;
 }
@@ -758,10 +723,11 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        switch (clock_id) {
@@ -782,7 +748,7 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -793,15 +759,13 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+       if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
+       if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -812,16 +776,15 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_uclk_dpm_states)
+       if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_uclk_dpm_states(smu,
-                       clock_values_in_khz, num_states))
+       if (!pp_funcs->get_uclk_dpm_states(pp_handle,
+                                          clock_values_in_khz,
+                                          num_states))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -832,15 +795,13 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_dpm_clock_table)
+       if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_dpm_clock_table(smu, clock_table))
+       if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -851,8 +812,11 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
 
        return PP_SMU_RESULT_OK;
 }
index 86960476823ce919da189a5b0401f13b15ccf900..46a33f64cf8ecfe34dcaa5acb0972958c64452fd 100644 (file)
@@ -597,6 +597,46 @@ TRACE_EVENT(amdgpu_dm_dce_clocks_state,
            )
 );
 
+TRACE_EVENT(amdgpu_dmub_trace_high_irq,
+       TP_PROTO(uint32_t trace_code, uint32_t tick_count, uint32_t param0,
+                uint32_t param1),
+       TP_ARGS(trace_code, tick_count, param0, param1),
+       TP_STRUCT__entry(
+               __field(uint32_t, trace_code)
+               __field(uint32_t, tick_count)
+               __field(uint32_t, param0)
+               __field(uint32_t, param1)
+               ),
+       TP_fast_assign(
+               __entry->trace_code = trace_code;
+               __entry->tick_count = tick_count;
+               __entry->param0 = param0;
+               __entry->param1 = param1;
+       ),
+       TP_printk("trace_code=%u tick_count=%u param0=%u param1=%u",
+                 __entry->trace_code, __entry->tick_count,
+                 __entry->param0, __entry->param1)
+);
+
+TRACE_EVENT(amdgpu_refresh_rate_track,
+       TP_PROTO(int crtc_index, ktime_t refresh_rate_ns, uint32_t refresh_rate_hz),
+       TP_ARGS(crtc_index, refresh_rate_ns, refresh_rate_hz),
+       TP_STRUCT__entry(
+               __field(int, crtc_index)
+               __field(ktime_t, refresh_rate_ns)
+               __field(uint32_t, refresh_rate_hz)
+               ),
+       TP_fast_assign(
+               __entry->crtc_index = crtc_index;
+               __entry->refresh_rate_ns = refresh_rate_ns;
+               __entry->refresh_rate_hz = refresh_rate_hz;
+       ),
+       TP_printk("crtc_index=%d refresh_rate=%dHz (%lld)",
+                 __entry->crtc_index,
+                 __entry->refresh_rate_hz,
+                 __entry->refresh_rate_ns)
+);
+
 #endif /* _AMDGPU_DM_TRACE_H_ */
 
 #undef TRACE_INCLUDE_PATH
index bbde6e6a4e4350a7ac00bb6fe8a8fbff48b21d8e..f33847299bca2a63b077fa4b8c7cc95059ce54f8 100644 (file)
@@ -54,8 +54,9 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
 
 include $(AMD_DC)
 
-DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
+dc_link_enc_cfg.o
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 DISPLAY_CORE += dc_vm_helper.o
index b208f06ed5149121dc394d9e42de1f38cc714333..d79f4fe06c47ed2fb1b74742cc1b564ed1c661c4 100644 (file)
@@ -916,6 +916,192 @@ static enum bp_result bios_parser_get_soc_bb_info(
        return result;
 }
 
+static enum bp_result get_disp_caps_v4_1(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_2(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_3(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_3 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_3,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_4(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_interop(
+       struct dc_bios *dcb,
+       uint8_t *dce_caps)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_UNSUPPORTED;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_UNSUPPORTED;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                               DATA_TABLES(dce_info));
+       get_atom_data_table_revision(header, &tbl_revision);
+       switch (tbl_revision.major) {
+       case 4:
+               switch (tbl_revision.minor) {
+               case 1:
+                       result = get_disp_caps_v4_1(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 2:
+                       result = get_disp_caps_v4_2(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 3:
+                       result = get_disp_caps_v4_3(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 4:
+                       result = get_disp_caps_v4_4(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_caps(
+       struct dc_bios *dcb,
+       uint8_t *dce_caps)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_UNSUPPORTED;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_UNSUPPORTED;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                               DATA_TABLES(dce_info));
+       get_atom_data_table_revision(header, &tbl_revision);
+       switch (tbl_revision.major) {
+       case 4:
+               switch (tbl_revision.minor) {
+               case 1:
+                       result = get_disp_caps_v4_1(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 2:
+                       result = get_disp_caps_v4_2(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 3:
+                       result = get_disp_caps_v4_3(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 4:
+                       result = get_disp_caps_v4_4(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
 static enum bp_result get_embedded_panel_info_v2_1(
                struct bios_parser *bp,
                struct embedded_panel_info *info)
@@ -2531,6 +2717,10 @@ static const struct dc_vbios_funcs vbios_funcs = {
        .get_soc_bb_info = bios_parser_get_soc_bb_info,
 
        .get_disp_connector_caps_info = bios_parser_get_disp_connector_caps_info,
+
+       .get_lttpr_caps = bios_parser_get_lttpr_caps,
+
+       .get_lttpr_interop = bios_parser_get_lttpr_interop,
 };
 
 static bool bios_parser2_construct(
index e633f8a51edb6b810cdd549d48d5982634b85bc6..1244fcb0f446dafb0d014a2fec137ab5c1d4e294 100644 (file)
@@ -98,16 +98,16 @@ static void calculate_bandwidth(
        int32_t num_cursor_lines;
 
        int32_t i, j, k;
-       struct bw_fixed yclk[3];
-       struct bw_fixed sclk[8];
+       struct bw_fixed *yclk;
+       struct bw_fixed *sclk;
        bool d0_underlay_enable;
        bool d1_underlay_enable;
        bool fbc_enabled;
        bool lpt_enabled;
        enum bw_defines sclk_message;
        enum bw_defines yclk_message;
-       enum bw_defines tiling_mode[maximum_number_of_surfaces];
-       enum bw_defines surface_type[maximum_number_of_surfaces];
+       enum bw_defines *tiling_mode;
+       enum bw_defines *surface_type;
        enum bw_defines voltage;
        enum bw_defines pipe_check;
        enum bw_defines hsr_check;
@@ -122,6 +122,22 @@ static void calculate_bandwidth(
        int32_t number_of_displays_enabled_with_margin = 0;
        int32_t number_of_aligned_displays_with_no_margin = 0;
 
+       yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL);
+       if (!yclk)
+               return;
+
+       sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL);
+       if (!sclk)
+               goto free_yclk;
+
+       tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL);
+       if (!tiling_mode)
+               goto free_sclk;
+
+       surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL);
+       if (!surface_type)
+               goto free_tiling_mode;
+
        yclk[low] = vbios->low_yclk;
        yclk[mid] = vbios->mid_yclk;
        yclk[high] = vbios->high_yclk;
@@ -2013,6 +2029,14 @@ static void calculate_bandwidth(
                        }
                }
        }
+
+       kfree(surface_type);
+free_tiling_mode:
+       kfree(tiling_mode);
+free_yclk:
+       kfree(yclk);
+free_sclk:
+       kfree(sclk);
 }
 
 /*******************************************************************************
@@ -2022,707 +2046,719 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
        struct bw_calcs_vbios *bw_vbios,
        struct hw_asic_id asic_id)
 {
-       struct bw_calcs_dceip dceip = { 0 };
-       struct bw_calcs_vbios vbios = { 0 };
+       struct bw_calcs_dceip *dceip;
+       struct bw_calcs_vbios *vbios;
 
        enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
 
-       dceip.version = version;
+       dceip = kzalloc(sizeof(*dceip), GFP_KERNEL);
+       if (!dceip)
+               return;
+
+       vbios = kzalloc(sizeof(*vbios), GFP_KERNEL);
+       if (!vbios) {
+               kfree(dceip);
+               return;
+       }
+
+       dceip->version = version;
 
        switch (version) {
        case BW_CALCS_VERSION_CARRIZO:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 64;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(1600);
-               vbios.mid_yclk = bw_int_to_fixed(1600);
-               vbios.low_yclk = bw_frc_to_fixed(66666, 100);
-               vbios.low_sclk = bw_int_to_fixed(200);
-               vbios.mid1_sclk = bw_int_to_fixed(300);
-               vbios.mid2_sclk = bw_int_to_fixed(300);
-               vbios.mid3_sclk = bw_int_to_fixed(300);
-               vbios.mid4_sclk = bw_int_to_fixed(300);
-               vbios.mid5_sclk = bw_int_to_fixed(300);
-               vbios.mid6_sclk = bw_int_to_fixed(300);
-               vbios.high_sclk = bw_frc_to_fixed(62609, 100);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(50);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 3;
-               dceip.number_of_underlay_pipes = 1;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = false;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 64;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(1600);
+               vbios->mid_yclk = bw_int_to_fixed(1600);
+               vbios->low_yclk = bw_frc_to_fixed(66666, 100);
+               vbios->low_sclk = bw_int_to_fixed(200);
+               vbios->mid1_sclk = bw_int_to_fixed(300);
+               vbios->mid2_sclk = bw_int_to_fixed(300);
+               vbios->mid3_sclk = bw_int_to_fixed(300);
+               vbios->mid4_sclk = bw_int_to_fixed(300);
+               vbios->mid5_sclk = bw_int_to_fixed(300);
+               vbios->mid6_sclk = bw_int_to_fixed(300);
+               vbios->high_sclk = bw_frc_to_fixed(62609, 100);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(50);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 3;
+               dceip->number_of_underlay_pipes = 1;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = false;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 2;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 2;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(0);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
                break;
        case BW_CALCS_VERSION_POLARIS10:
                /* TODO: Treat VEGAM the same as P10 for now
                 * Need to tune the para for VEGAM if needed */
        case BW_CALCS_VERSION_VEGAM:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(400);
-               vbios.mid2_sclk = bw_int_to_fixed(500);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(700);
-               vbios.mid5_sclk = bw_int_to_fixed(800);
-               vbios.mid6_sclk = bw_int_to_fixed(974);
-               vbios.high_sclk = bw_int_to_fixed(1154);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(45);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 6;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(400);
+               vbios->mid2_sclk = bw_int_to_fixed(500);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(700);
+               vbios->mid5_sclk = bw_int_to_fixed(800);
+               vbios->mid6_sclk = bw_int_to_fixed(974);
+               vbios->high_sclk = bw_int_to_fixed(1154);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 6;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_POLARIS11:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(400);
-               vbios.mid2_sclk = bw_int_to_fixed(500);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(700);
-               vbios.mid5_sclk = bw_int_to_fixed(800);
-               vbios.mid6_sclk = bw_int_to_fixed(974);
-               vbios.high_sclk = bw_int_to_fixed(1154);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               if (vbios.number_of_dram_channels == 2) // 64-bit
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(400);
+               vbios->mid2_sclk = bw_int_to_fixed(500);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(700);
+               vbios->mid5_sclk = bw_int_to_fixed(800);
+               vbios->mid6_sclk = bw_int_to_fixed(974);
+               vbios->high_sclk = bw_int_to_fixed(1154);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               if (vbios->number_of_dram_channels == 2) // 64-bit
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
                else
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(45);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 5;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 5;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_POLARIS12:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(678);
-               vbios.mid1_sclk = bw_int_to_fixed(864);
-               vbios.mid2_sclk = bw_int_to_fixed(900);
-               vbios.mid3_sclk = bw_int_to_fixed(920);
-               vbios.mid4_sclk = bw_int_to_fixed(940);
-               vbios.mid5_sclk = bw_int_to_fixed(960);
-               vbios.mid6_sclk = bw_int_to_fixed(980);
-               vbios.high_sclk = bw_int_to_fixed(1049);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               if (vbios.number_of_dram_channels == 2) // 64-bit
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(678);
+               vbios->mid1_sclk = bw_int_to_fixed(864);
+               vbios->mid2_sclk = bw_int_to_fixed(900);
+               vbios->mid3_sclk = bw_int_to_fixed(920);
+               vbios->mid4_sclk = bw_int_to_fixed(940);
+               vbios->mid5_sclk = bw_int_to_fixed(960);
+               vbios->mid6_sclk = bw_int_to_fixed(980);
+               vbios->high_sclk = bw_int_to_fixed(1049);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               if (vbios->number_of_dram_channels == 2) // 64-bit
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
                else
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(250);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = false;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 5;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = true;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(250);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = false;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 5;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = true;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_STONEY:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 64;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(1866);
-               vbios.mid_yclk = bw_int_to_fixed(1866);
-               vbios.low_yclk = bw_int_to_fixed(1333);
-               vbios.low_sclk = bw_int_to_fixed(200);
-               vbios.mid1_sclk = bw_int_to_fixed(600);
-               vbios.mid2_sclk = bw_int_to_fixed(600);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(600);
-               vbios.mid5_sclk = bw_int_to_fixed(600);
-               vbios.mid6_sclk = bw_int_to_fixed(600);
-               vbios.high_sclk = bw_int_to_fixed(800);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(50);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 2;
-               dceip.number_of_underlay_pipes = 1;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 64;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(1866);
+               vbios->mid_yclk = bw_int_to_fixed(1866);
+               vbios->low_yclk = bw_int_to_fixed(1333);
+               vbios->low_sclk = bw_int_to_fixed(200);
+               vbios->mid1_sclk = bw_int_to_fixed(600);
+               vbios->mid2_sclk = bw_int_to_fixed(600);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(600);
+               vbios->mid5_sclk = bw_int_to_fixed(600);
+               vbios->mid6_sclk = bw_int_to_fixed(600);
+               vbios->high_sclk = bw_int_to_fixed(800);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(50);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 2;
+               dceip->number_of_underlay_pipes = 1;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 2;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 2;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(0);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_VEGA10:
-               vbios.memory_type = bw_def_hbm;
-               vbios.dram_channel_width_in_bits = 128;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 16;
-               vbios.high_yclk = bw_int_to_fixed(2400);
-               vbios.mid_yclk = bw_int_to_fixed(1700);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(350);
-               vbios.mid2_sclk = bw_int_to_fixed(400);
-               vbios.mid3_sclk = bw_int_to_fixed(500);
-               vbios.mid4_sclk = bw_int_to_fixed(600);
-               vbios.mid5_sclk = bw_int_to_fixed(700);
-               vbios.mid6_sclk = bw_int_to_fixed(760);
-               vbios.high_sclk = bw_int_to_fixed(776);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(39);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = false;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = true;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 6;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = true;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_hbm;
+               vbios->dram_channel_width_in_bits = 128;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 16;
+               vbios->high_yclk = bw_int_to_fixed(2400);
+               vbios->mid_yclk = bw_int_to_fixed(1700);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(350);
+               vbios->mid2_sclk = bw_int_to_fixed(400);
+               vbios->mid3_sclk = bw_int_to_fixed(500);
+               vbios->mid4_sclk = bw_int_to_fixed(600);
+               vbios->mid5_sclk = bw_int_to_fixed(700);
+               vbios->mid6_sclk = bw_int_to_fixed(760);
+               vbios->high_sclk = bw_int_to_fixed(776);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(460);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(39);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = false;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(2304);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = true;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 6;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = true;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 24576;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = false;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 24576;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = false;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        default:
                break;
        }
-       *bw_dceip = dceip;
-       *bw_vbios = vbios;
+       *bw_dceip = *dceip;
+       *bw_vbios = *vbios;
 
+       kfree(dceip);
+       kfree(vbios);
 }
 
 /*
index f7c728d4f50a0c75f57cb1a442ea20f57eb4d37b..7d6c68c5dea9c48263e1534c59371d26fcfe63ec 100644 (file)
@@ -125,87 +125,136 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
 {
        struct hw_asic_id asic_id = ctx->asic_id;
 
-       struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
-       if (clk_mgr == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
        switch (asic_id.chip_family) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
-       case FAMILY_SI:
+       case FAMILY_SI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce60_clk_mgr_construct(ctx, clk_mgr);
-               break;
+               dce_clk_mgr_construct(ctx, clk_mgr);
+               return &clk_mgr->base;
+       }
 #endif
        case FAMILY_CI:
-       case FAMILY_KV:
+       case FAMILY_KV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce_clk_mgr_construct(ctx, clk_mgr);
-               break;
-       case FAMILY_CZ:
+               return &clk_mgr->base;
+       }
+       case FAMILY_CZ: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce110_clk_mgr_construct(ctx, clk_mgr);
-               break;
-       case FAMILY_VI:
+               return &clk_mgr->base;
+       }
+       case FAMILY_VI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
                        dce_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
                        dce112_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
                        dce112_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
+               }
+               return &clk_mgr->base;
+       }
+       case FAMILY_AI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
                }
-               break;
-       case FAMILY_AI:
                if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
                        dce121_clk_mgr_construct(ctx, clk_mgr);
                else
                        dce120_clk_mgr_construct(ctx, clk_mgr);
-               break;
-
+               return &clk_mgr->base;
+       }
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       case FAMILY_RV:
+       case FAMILY_RV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
+
                if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
                        rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
 
                if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
                        rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
                        rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
                                ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
                        rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
-                       break;
+                       return &clk_mgr->base;
                }
-               break;
+               return &clk_mgr->base;
+       }
+       case FAMILY_NV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
 
-       case FAMILY_NV:
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-               break;
-
+               return &clk_mgr->base;
+       }
        case FAMILY_VGH:
-               if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev))
+               if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
+                       struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+                       if (clk_mgr == NULL) {
+                               BREAK_TO_DEBUGGER();
+                               return NULL;
+                       }
                        vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+                       return &clk_mgr->base.base;
+               }
                break;
 #endif
        default:
@@ -213,7 +262,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
                break;
        }
 
-       return &clk_mgr->base;
+       return NULL;
 }
 
 void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
@@ -226,6 +275,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
                if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_destroy(clk_mgr);
                }
+               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+                       dcn3_clk_mgr_destroy(clk_mgr);
+               }
                break;
 
        case FAMILY_VGH:
index 01b1853b7750dfb05fcdb1cb733d8b33b3f7529e..887a54246bde061ab1b0a5156d0945832351af4a 100644 (file)
@@ -797,7 +797,18 @@ static struct wm_table lpddr4_wm_table_rn = {
                },
        }
 };
+static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
+{
+       int i;
+
+       for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
+               if (clock_table->SocClocks[i].Vol == voltage)
+                       return clock_table->SocClocks[i].Freq;
+       }
 
+       ASSERT(0);
+       return 0;
+}
 static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
 {
        int i;
@@ -841,6 +852,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
                bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
                bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
                bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
+               bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table,
+                                                                       bw_params->clk_table.entries[i].voltage);
        }
 
        bw_params->vram_type = bios_info->memory_type;
index 81ea5d3a1947bba1f0806cb456ad6fd699a86b99..577e7f97045efb31758e8940ef80a0afb5089a89 100644 (file)
@@ -432,6 +432,12 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
                        clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
 }
 
+static bool dcn3_is_smu_prsent(struct clk_mgr *clk_mgr_base)
+{
+       struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+       return clk_mgr->smu_present;
+}
+
 static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
                                        struct dc_clocks *b)
 {
@@ -494,6 +500,7 @@ static struct clk_mgr_funcs dcn3_funcs = {
                .are_clock_states_equal = dcn3_are_clock_states_equal,
                .enable_pme_wa = dcn3_enable_pme_wa,
                .notify_link_rate_change = dcn30_notify_link_rate_change,
+               .is_smu_present = dcn3_is_smu_prsent
 };
 
 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
index 68942bbc7472847b7f13fddef0d3d1399441c93a..07774fa2c2cfa2c9f6277e7f2192cce87010c5b1 100644 (file)
@@ -113,10 +113,13 @@ int dcn301_smu_send_msg_with_param(
 
 int dcn301_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
 {
-       return dcn301_smu_send_msg_with_param(
-                       clk_mgr,
-                       VBIOSSMC_MSG_GetSmuVersion,
-                       0);
+       int smu_version = dcn301_smu_send_msg_with_param(clk_mgr,
+                                                        VBIOSSMC_MSG_GetSmuVersion,
+                                                        0);
+
+       DC_LOG_DEBUG("%s %x\n", __func__, smu_version);
+
+       return smu_version;
 }
 
 
@@ -124,6 +127,8 @@ int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispc
 {
        int actual_dispclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dispclk_khz);
+
        /*  Unit of SMU msg parameter is Mhz */
        actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
@@ -137,6 +142,8 @@ int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
 {
        int actual_dprefclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s %d\n", __func__, clk_mgr->base.dprefclk_khz / 1000);
+
        actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDprefclkFreq,
@@ -151,6 +158,8 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
 {
        int actual_dcfclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dcfclk_khz);
+
        actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
@@ -163,6 +172,8 @@ int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int r
 {
        int actual_min_ds_dcfclk_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_min_ds_dcfclk_khz);
+
        actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
@@ -175,6 +186,8 @@ int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_kh
 {
        int actual_dppclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dpp_khz);
+
        actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDppclkFreq,
@@ -187,6 +200,8 @@ void dcn301_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr,
 {
        //TODO: Work with smu team to define optimization options.
 
+       DC_LOG_DEBUG("%s(%x)\n", __func__, idle_info);
+
        dcn301_smu_send_msg_with_param(
                clk_mgr,
                VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -202,6 +217,8 @@ void dcn301_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool
                idle_info.idle_info.phy_ref_clk_off = 1;
        }
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, enable);
+
        dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -218,12 +235,16 @@ void dcn301_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
 
 void dcn301_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
 {
+       DC_LOG_DEBUG("%s(%x)\n", __func__, addr_high);
+
        dcn301_smu_send_msg_with_param(clk_mgr,
                        VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
 }
 
 void dcn301_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
 {
+       DC_LOG_DEBUG("%s(%x)\n", __func__, addr_low);
+
        dcn301_smu_send_msg_with_param(clk_mgr,
                        VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
 }
index aadb801447a7cb0c3cc1eef7d56ececa1546a214..c636b589d69db3c696cc7d6c347c808993122fb4 100644 (file)
@@ -32,9 +32,8 @@
 // For dcn20_update_clocks_update_dpp_dto
 #include "dcn20/dcn20_clk_mgr.h"
 
-
-
 #include "vg_clk_mgr.h"
+#include "dcn301_smu.h"
 #include "reg_helper.h"
 #include "core_types.h"
 #include "dm_helpers.h"
 
 /* Macros */
 
+#define TO_CLK_MGR_VGH(clk_mgr)\
+       container_of(clk_mgr, struct clk_mgr_vgh, base)
+
 #define REG(reg_name) \
        (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
 
 /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
-int vg_get_active_display_cnt_wa(
+static int vg_get_active_display_cnt_wa(
                struct dc *dc,
                struct dc_state *context)
 {
@@ -134,13 +136,13 @@ void vg_update_clocks(struct clk_mgr *clk_mgr_base,
                }
        }
 
-       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && !dc->debug.disable_min_fclk) {
                clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
                dcn301_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
        }
 
        if (should_set_clock(safe_to_lower,
-                       new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+                       new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && !dc->debug.disable_min_fclk) {
                clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
                dcn301_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
        }
@@ -377,7 +379,7 @@ void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
        s->dprefclk_khz = sb.dprefclk * 1000;
 }
 
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 
@@ -449,15 +451,16 @@ static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct wa
 }
 
 
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+static void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-       struct watermarks *table = clk_mgr_base->smu_wm_set.wm_set;
+       struct clk_mgr_vgh *clk_mgr_vgh = TO_CLK_MGR_VGH(clk_mgr);
+       struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set;
 
        if (!clk_mgr->smu_ver)
                return;
 
-       if (!table || clk_mgr_base->smu_wm_set.mc_address.quad_part == 0)
+       if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0)
                return;
 
        memset(table, 0, sizeof(*table));
@@ -465,9 +468,9 @@ void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
        vg_build_watermark_ranges(clk_mgr_base->bw_params, table);
 
        dcn301_smu_set_dram_addr_high(clk_mgr,
-                       clk_mgr_base->smu_wm_set.mc_address.high_part);
+                       clk_mgr_vgh->smu_wm_set.mc_address.high_part);
        dcn301_smu_set_dram_addr_low(clk_mgr,
-                       clk_mgr_base->smu_wm_set.mc_address.low_part);
+                       clk_mgr_vgh->smu_wm_set.mc_address.low_part);
        dcn301_smu_transfer_wm_table_dram_2_smu(clk_mgr);
 }
 
@@ -625,7 +628,7 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
        return 0;
 }
 
-void vg_clk_mgr_helper_populate_bw_params(
+static void vg_clk_mgr_helper_populate_bw_params(
                struct clk_mgr_internal *clk_mgr,
                struct integrated_info *bios_info,
                const struct vg_dpm_clocks *clock_table)
@@ -703,7 +706,7 @@ static struct vg_dpm_clocks dummy_clocks = {
 
 static struct watermarks dummy_wms = { 0 };
 
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+static void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
                struct smu_dpm_clks *smu_dpm_clks)
 {
        struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks;
@@ -725,39 +728,39 @@ void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
 
 void vg_clk_mgr_construct(
                struct dc_context *ctx,
-               struct clk_mgr_internal *clk_mgr,
+               struct clk_mgr_vgh *clk_mgr,
                struct pp_smu_funcs *pp_smu,
                struct dccg *dccg)
 {
        struct smu_dpm_clks smu_dpm_clks = { 0 };
 
-       clk_mgr->base.ctx = ctx;
-       clk_mgr->base.funcs = &vg_funcs;
+       clk_mgr->base.base.ctx = ctx;
+       clk_mgr->base.base.funcs = &vg_funcs;
 
-       clk_mgr->pp_smu = pp_smu;
+       clk_mgr->base.pp_smu = pp_smu;
 
-       clk_mgr->dccg = dccg;
-       clk_mgr->dfs_bypass_disp_clk = 0;
+       clk_mgr->base.dccg = dccg;
+       clk_mgr->base.dfs_bypass_disp_clk = 0;
 
-       clk_mgr->dprefclk_ss_percentage = 0;
-       clk_mgr->dprefclk_ss_divider = 1000;
-       clk_mgr->ss_on_dprefclk = false;
-       clk_mgr->dfs_ref_freq_khz = 48000;
+       clk_mgr->base.dprefclk_ss_percentage = 0;
+       clk_mgr->base.dprefclk_ss_divider = 1000;
+       clk_mgr->base.ss_on_dprefclk = false;
+       clk_mgr->base.dfs_ref_freq_khz = 48000;
 
-       clk_mgr->base.smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
-                               clk_mgr->base.ctx,
+       clk_mgr->smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
+                               clk_mgr->base.base.ctx,
                                DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                sizeof(struct watermarks),
-                               &clk_mgr->base.smu_wm_set.mc_address.quad_part);
+                               &clk_mgr->smu_wm_set.mc_address.quad_part);
 
-       if (clk_mgr->base.smu_wm_set.wm_set == 0) {
-               clk_mgr->base.smu_wm_set.wm_set = &dummy_wms;
-               clk_mgr->base.smu_wm_set.mc_address.quad_part = 0;
+       if (clk_mgr->smu_wm_set.wm_set == 0) {
+               clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+               clk_mgr->smu_wm_set.mc_address.quad_part = 0;
        }
-       ASSERT(clk_mgr->base.smu_wm_set.wm_set);
+       ASSERT(clk_mgr->smu_wm_set.wm_set);
 
        smu_dpm_clks.dpm_clks = (struct vg_dpm_clocks *)dm_helpers_allocate_gpu_mem(
-                               clk_mgr->base.ctx,
+                               clk_mgr->base.base.ctx,
                                DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                sizeof(struct vg_dpm_clocks),
                                &smu_dpm_clks.mc_address.quad_part);
@@ -771,21 +774,21 @@ void vg_clk_mgr_construct(
 
        if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
                vg_funcs.update_clocks = dcn2_update_clocks_fpga;
-               clk_mgr->base.dentist_vco_freq_khz = 3600000;
+               clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
        } else {
                struct clk_log_info log_info = {0};
 
-               clk_mgr->smu_ver = dcn301_smu_get_smu_version(clk_mgr);
+               clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
 
-               if (clk_mgr->smu_ver)
-                       clk_mgr->smu_present = true;
+               if (clk_mgr->base.smu_ver)
+                       clk_mgr->base.smu_present = true;
 
                /* TODO: Check we get what we expect during bringup */
-               clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+               clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
 
                /* in case we don't get a value from the register, use default */
-               if (clk_mgr->base.dentist_vco_freq_khz == 0)
-                       clk_mgr->base.dentist_vco_freq_khz = 3600000;
+               if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
+                       clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
 
                if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
                        vg_bw_params.wm_table = lpddr5_wm_table;
@@ -793,36 +796,38 @@ void vg_clk_mgr_construct(
                        vg_bw_params.wm_table = ddr4_wm_table;
                }
                /* Saved clocks configured at boot for debug purposes */
-               vg_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+               vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
        }
 
-       clk_mgr->base.dprefclk_khz = 600000;
-       dce_clock_read_ss_info(clk_mgr);
+       clk_mgr->base.base.dprefclk_khz = 600000;
+       dce_clock_read_ss_info(&clk_mgr->base);
 
-       clk_mgr->base.bw_params = &vg_bw_params;
+       clk_mgr->base.base.bw_params = &vg_bw_params;
 
-       vg_get_dpm_table_from_smu(clk_mgr, &smu_dpm_clks);
+       vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
        if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
                vg_clk_mgr_helper_populate_bw_params(
-                               clk_mgr,
+                               &clk_mgr->base,
                                ctx->dc_bios->integrated_info,
                                smu_dpm_clks.dpm_clks);
        }
 
        if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
-               dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+               dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                smu_dpm_clks.dpm_clks);
 /*
-       if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver) {
+       if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) {
                 enable powerfeatures when displaycount goes to 0
                dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
        }
 */
 }
 
-void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
 {
-       if (clk_mgr->base.smu_wm_set.wm_set && clk_mgr->base.smu_wm_set.mc_address.quad_part != 0)
-               dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
-                               clk_mgr->base.smu_wm_set.wm_set);
+       struct clk_mgr_vgh *clk_mgr = TO_CLK_MGR_VGH(clk_mgr_int);
+
+       if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+               dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+                               clk_mgr->smu_wm_set.wm_set);
 }
index b5115b3123a18f52a7c6ff2ffdda80e14f2a98f2..7255477307f1361e7b3f36edcfee636572a5fc57 100644 (file)
 
 #ifndef __VG_CLK_MGR_H__
 #define __VG_CLK_MGR_H__
+#include "clk_mgr_internal.h"
 
-int vg_get_active_display_cnt_wa(
-               struct dc *dc,
-               struct dc_state *context);
+struct watermarks;
 
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base);
+struct smu_watermark_set {
+       struct watermarks *wm_set;
+       union large_integer mc_address;
+};
+
+struct clk_mgr_vgh {
+       struct clk_mgr_internal base;
+       struct smu_watermark_set smu_wm_set;
+};
 
 void vg_clk_mgr_construct(struct dc_context *ctx,
-               struct clk_mgr_internal *clk_mgr,
+               struct clk_mgr_vgh *clk_mgr,
                struct pp_smu_funcs *pp_smu,
                struct dccg *dccg);
 
 void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
 
-#include "dcn301_smu.h"
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base);
-
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
-               struct smu_dpm_clks *smu_dpm_clks);
-
-void vg_clk_mgr_helper_populate_bw_params(
-               struct clk_mgr_internal *clk_mgr,
-               struct integrated_info *bios_info,
-               const struct vg_dpm_clocks *clock_table);
-
 #endif //__VG_CLK_MGR_H__
index 8e6c815b55d2dc590f5f27a579bb210679587816..8f0a13807d050eedf3b74a88a87bf2d677d627f2 100644 (file)
 #include "timing_generator.h"
 #include "abm.h"
 #include "virtual/virtual_link_encoder.h"
+#include "hubp.h"
 
 #include "link_hwss.h"
 #include "link_encoder.h"
+#include "link_enc_cfg.h"
 
 #include "dc_link_ddc.h"
 #include "dm_helpers.h"
@@ -304,7 +306,10 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
        int i = 0;
        bool ret = false;
 
-       stream->adjust = *adjust;
+       stream->adjust.v_total_max = adjust->v_total_max;
+       stream->adjust.v_total_mid = adjust->v_total_mid;
+       stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+       stream->adjust.v_total_min = adjust->v_total_min;
 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -312,10 +317,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                if (pipe->stream == stream && pipe->stream_res.tg) {
                        dc->hwss.set_drr(&pipe,
                                        1,
-                                       adjust->v_total_min,
-                                       adjust->v_total_max,
-                                       adjust->v_total_mid,
-                                       adjust->v_total_mid_frame_num);
+                                       *adjust);
 
                        ret = true;
                }
@@ -870,6 +872,9 @@ static bool dc_construct(struct dc *dc,
        if (!create_links(dc, init_params->num_virtual_links))
                goto fail;
 
+       /* Initialise DIG link encoder resource tracking variables. */
+       link_enc_cfg_init(dc, dc->current_state);
+
        return true;
 
 fail:
@@ -2091,6 +2096,10 @@ static enum surface_update_type check_update_surfaces_for_stream(
        if (stream_status == NULL || stream_status->plane_count != surface_count)
                overall_type = UPDATE_TYPE_FULL;
 
+       if (stream_update && stream_update->pending_test_pattern) {
+               overall_type = UPDATE_TYPE_FULL;
+       }
+
        /* some stream updates require passive update */
        if (stream_update) {
                union stream_update_flags *su_flags = &stream_update->stream->update_flags;
@@ -2390,6 +2399,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
        if (update->dither_option)
                stream->dither_option = *update->dither_option;
 
+       if (update->pending_test_pattern)
+               stream->test_pattern = *update->pending_test_pattern;
        /* update current stream with writeback info */
        if (update->wb_update) {
                int i;
@@ -2485,6 +2496,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
                                }
                        }
 
+
                        /* Full fe update*/
                        if (update_type == UPDATE_TYPE_FAST)
                                continue;
@@ -2492,6 +2504,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
                        if (stream_update->dsc_config)
                                dp_update_dsc_config(pipe_ctx);
 
+                       if (stream_update->pending_test_pattern) {
+                               dc_link_dp_set_test_pattern(stream->link,
+                                       stream->test_pattern.type,
+                                       stream->test_pattern.color_space,
+                                       stream->test_pattern.p_link_settings,
+                                       stream->test_pattern.p_custom_pattern,
+                                       stream->test_pattern.cust_pattern_size);
+                       }
+
                        if (stream_update->dpms_off) {
                                if (*stream_update->dpms_off) {
                                        core_link_disable_stream(pipe_ctx);
@@ -2578,6 +2599,17 @@ static void commit_planes_for_stream(struct dc *dc,
                }
        }
 
+#ifdef CONFIG_DRM_AMD_DC_DCN
+       if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+               struct pipe_ctx *mpcc_pipe;
+               struct pipe_ctx *odm_pipe;
+
+               for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+                       for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+                               odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
+       }
+#endif
+
        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
                        if (should_use_dmub_lock(stream->link)) {
@@ -2784,6 +2816,9 @@ static void commit_planes_for_stream(struct dc *dc,
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
+               if (!pipe_ctx->plane_state)
+                       continue;
+
                if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
                                !pipe_ctx->stream || pipe_ctx->stream != stream ||
                                !pipe_ctx->plane_state->update_flags.bits.addr_update)
@@ -3225,6 +3260,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
        if (dc->debug.disable_idle_power_optimizations)
                return;
 
+       if (dc->clk_mgr->funcs->is_smu_present)
+               if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
+                       return;
+
        if (allow == dc->idle_optimizations_allowed)
                return;
 
index f9a33dc52c4555ad6ac992cfe5c159f4e0838460..29bc2874f6a7ac5eef98fba7c10823a65ef96d38 100644 (file)
@@ -92,11 +92,14 @@ static void dc_link_destruct(struct dc_link *link)
                link->panel_cntl->funcs->destroy(&link->panel_cntl);
 
        if (link->link_enc) {
-               /* Update link encoder tracking variables. These are used for the dynamic
-                * assignment of link encoders to streams.
+               /* Update link encoder resource tracking variables. These are used for
+                * the dynamic assignment of link encoders to streams. Virtual links
+                * are not assigned encoder resources on creation.
                 */
-               link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = NULL;
-               link->dc->res_pool->dig_link_enc_count--;
+               if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
+                       link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
+                       link->dc->res_pool->dig_link_enc_count--;
+               }
                link->link_enc->funcs->destroy(&link->link_enc);
        }
 
@@ -1407,6 +1410,8 @@ static bool dc_link_construct(struct dc_link *link,
        link->link_id =
                bios->funcs->get_connector_id(bios, init_params->connector_index);
 
+       link->ep_type = DISPLAY_ENDPOINT_PHY;
+
        DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
 
        if (bios->funcs->get_disp_connector_caps_info) {
@@ -1506,10 +1511,12 @@ static bool dc_link_construct(struct dc_link *link,
                (link->link_id.id == CONNECTOR_ID_EDP ||
                        link->link_id.id == CONNECTOR_ID_LVDS)) {
                panel_cntl_init_data.ctx = dc_ctx;
-               panel_cntl_init_data.inst = link->link_index;
+               panel_cntl_init_data.inst =
+                       panel_cntl_init_data.ctx->dc_edp_id_count;
                link->panel_cntl =
                        link->dc->res_pool->funcs->panel_cntl_create(
                                                                &panel_cntl_init_data);
+               panel_cntl_init_data.ctx->dc_edp_id_count++;
 
                if (link->panel_cntl == NULL) {
                        DC_ERROR("Failed to create link panel_cntl!\n");
@@ -1541,7 +1548,8 @@ static bool dc_link_construct(struct dc_link *link,
        /* Update link encoder tracking variables. These are used for the dynamic
         * assignment of link encoders to streams.
         */
-       link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = link->link_enc;
+       link->eng_id = link->link_enc->preferred_engine;
+       link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
        link->dc->res_pool->dig_link_enc_count++;
 
        link->link_enc_hw_inst = link->link_enc->transmitter;
@@ -2883,8 +2891,8 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
 static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
 {
        struct fixed31_32 peak_kbps;
-       uint32_t numerator;
-       uint32_t denominator;
+       uint32_t numerator = 0;
+       uint32_t denominator = 1;
 
        /*
         * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
index 47e6c33f73cbad613b85014f864d703b32c62aa0..7d2e433c227563a9b2fc3885dc0a3279a908e7d9 100644 (file)
@@ -284,7 +284,7 @@ static uint8_t dc_dp_initialize_scrambling_data_symbols(
 
 static inline bool is_repeater(struct dc_link *link, uint32_t offset)
 {
-       return (link->lttpr_non_transparent_mode && offset != 0);
+       return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
 }
 
 static void dpcd_set_lt_pattern_and_lane_settings(
@@ -1072,7 +1072,7 @@ static enum link_training_result perform_clock_recovery_sequence(
                /* 3. wait receiver to lock-on*/
                wait_time_microsec = lt_settings->cr_pattern_time;
 
-               if (link->lttpr_non_transparent_mode)
+               if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
                        wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
 
                wait_for_training_aux_rd_interval(
@@ -1098,11 +1098,13 @@ static enum link_training_result perform_clock_recovery_sequence(
                if (is_max_vs_reached(lt_settings))
                        break;
 
-               /* 7. same voltage*/
-               /* Note: VS same for all lanes,
-               * so comparing first lane is sufficient*/
-               if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+               /* 7. same lane settings*/
+               /* Note: settings are the same for all lanes,
+                * so comparing first lane is sufficient*/
+               if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
                        req_settings.lane_settings[0].VOLTAGE_SWING)
+                       && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
+                               req_settings.lane_settings[0].PRE_EMPHASIS))
                        retries_cr++;
                else
                        retries_cr = 0;
@@ -1324,7 +1326,17 @@ static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
        return 0; // invalid value
 }
 
-static void configure_lttpr_mode(struct dc_link *link)
+static void configure_lttpr_mode_transparent(struct dc_link *link)
+{
+       uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+       core_link_write_dpcd(link,
+                       DP_PHY_REPEATER_MODE,
+                       (uint8_t *)&repeater_mode,
+                       sizeof(repeater_mode));
+}
+
+static void configure_lttpr_mode_non_transparent(struct dc_link *link)
 {
        /* aux timeout is already set to extended */
        /* RESET/SET lttpr mode to enable non transparent mode */
@@ -1344,7 +1356,7 @@ static void configure_lttpr_mode(struct dc_link *link)
                link->dpcd_caps.lttpr_caps.mode = repeater_mode;
        }
 
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
 
                DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
 
@@ -1560,8 +1572,10 @@ enum link_training_result dc_link_dp_perform_link_training(
                        &lt_settings);
 
        /* Configure lttpr mode */
-       if (link->lttpr_non_transparent_mode)
-               configure_lttpr_mode(link);
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+               configure_lttpr_mode_non_transparent(link);
+       else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+               configure_lttpr_mode_transparent(link);
 
        if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
                start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
@@ -1576,7 +1590,7 @@ enum link_training_result dc_link_dp_perform_link_training(
 
        dp_set_fec_ready(link, fec_enable);
 
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
 
                /* 2. perform link training (set link training done
                 *  to false is done as well)
@@ -1633,6 +1647,42 @@ enum link_training_result dc_link_dp_perform_link_training(
        return status;
 }
 
+static enum dp_panel_mode try_enable_assr(struct dc_stream_state *stream)
+{
+       struct dc_link *link = stream->link;
+       enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+#endif
+
+       /* ASSR must be supported on the panel */
+       if (panel_mode == DP_PANEL_MODE_DEFAULT)
+               return panel_mode;
+
+       /* eDP or internal DP only */
+       if (link->connector_signal != SIGNAL_TYPE_EDP &&
+               !(link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                link->is_internal_display))
+               return DP_PANEL_MODE_DEFAULT;
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (cp_psp && cp_psp->funcs.enable_assr) {
+               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
+                       /* since eDP implies ASSR on, change panel
+                        * mode to disable ASSR
+                        */
+                       panel_mode = DP_PANEL_MODE_DEFAULT;
+               }
+       } else
+               panel_mode = DP_PANEL_MODE_DEFAULT;
+
+#else
+       /* turn off ASSR if the implementation is not compiled in */
+       panel_mode = DP_PANEL_MODE_DEFAULT;
+#endif
+       return panel_mode;
+}
+
 bool perform_link_training_with_retries(
        const struct dc_link_settings *link_setting,
        bool skip_video_pattern,
@@ -1644,7 +1694,7 @@ bool perform_link_training_with_retries(
        uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
-       enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+       enum dp_panel_mode panel_mode;
 
        /* We need to do this before the link training to ensure the idle pattern in SST
         * mode will be sent right after the link training
@@ -1669,32 +1719,25 @@ bool perform_link_training_with_retries(
                        msleep(delay_dp_power_up_in_ms);
                }
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-               if (panel_mode == DP_PANEL_MODE_EDP) {
-                       struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
-                       if (cp_psp && cp_psp->funcs.enable_assr) {
-                               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
-                                       /* since eDP implies ASSR on, change panel
-                                        * mode to disable ASSR
-                                        */
-                                       panel_mode = DP_PANEL_MODE_DEFAULT;
-                               }
-                       } else
-                               panel_mode = DP_PANEL_MODE_DEFAULT;
-               }
-#endif
-
+               panel_mode = try_enable_assr(stream);
                dp_set_panel_mode(link, panel_mode);
+               DC_LOG_DETECTION_DP_CAPS("Link: %d ASSR enabled: %d\n",
+                        link->link_index,
+                        panel_mode != DP_PANEL_MODE_DEFAULT);
 
                if (link->aux_access_disabled) {
                        dc_link_dp_perform_link_training_skip_aux(link, link_setting);
                        return true;
-               } else if (dc_link_dp_perform_link_training(
-                               link,
-                               link_setting,
-                               skip_video_pattern) == LINK_TRAINING_SUCCESS)
-                       return true;
+               } else {
+                       enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
+
+                               status = dc_link_dp_perform_link_training(
+                                                                               link,
+                                                                               link_setting,
+                                                                               skip_video_pattern);
+                       if (status == LINK_TRAINING_SUCCESS)
+                               return true;
+               }
 
                /* latest link training still fail, skip delay and keep PHY on
                 */
@@ -1873,7 +1916,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
         * account for lttpr repeaters cap
         * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
         */
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
                if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
                        max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
 
@@ -2031,7 +2074,7 @@ bool dp_verify_link_cap(
        max_link_cap = get_max_link_cap(link);
 
        /* Grant extended timeout request */
-       if (link->lttpr_non_transparent_mode && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
                uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
 
                core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -2782,10 +2825,27 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
        enum dp_test_pattern test_pattern;
        enum dp_test_pattern_color_space test_pattern_color_space =
                        DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+       enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+       struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+       struct pipe_ctx *pipe_ctx = NULL;
+       int i;
 
        memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
        memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
 
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream == NULL)
+                       continue;
+
+               if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+                       pipe_ctx = &pipes[i];
+                       break;
+               }
+       }
+
+       if (pipe_ctx == NULL)
+               return;
+
        /* get link test pattern and pattern parameters */
        core_link_read_dpcd(
                        link,
@@ -2823,6 +2883,33 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
                                DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
                                DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
 
+       switch (dpcd_test_params.bits.BPC) {
+       case 0: // 6 bits
+               requestColorDepth = COLOR_DEPTH_666;
+               break;
+       case 1: // 8 bits
+               requestColorDepth = COLOR_DEPTH_888;
+               break;
+       case 2: // 10 bits
+               requestColorDepth = COLOR_DEPTH_101010;
+               break;
+       case 3: // 12 bits
+               requestColorDepth = COLOR_DEPTH_121212;
+               break;
+       default:
+               break;
+       }
+
+       if (requestColorDepth != COLOR_DEPTH_UNDEFINED
+                       && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
+               DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
+                               __func__,
+                               pipe_ctx->stream->timing.display_color_depth,
+                               requestColorDepth);
+               pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+               dp_update_dsc_config(pipe_ctx);
+       }
+
        dc_link_dp_set_test_pattern(
                        link,
                        test_pattern,
@@ -3369,6 +3456,9 @@ static bool retrieve_link_cap(struct dc_link *link)
        struct dp_sink_hw_fw_revision dp_hw_fw_revision;
        bool is_lttpr_present = false;
        const uint32_t post_oui_delay = 30; // 30ms
+       bool vbios_lttpr_enable = false;
+       bool vbios_lttpr_interop = false;
+       struct dc_bios *bios = link->dc->ctx->dc_bios;
 
        memset(dpcd_data, '\0', sizeof(dpcd_data));
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
@@ -3416,13 +3506,45 @@ static bool retrieve_link_cap(struct dc_link *link)
                return false;
        }
 
-       if (link->dc->caps.extended_aux_timeout_support &&
-                       link->dc->config.allow_lttpr_non_transparent_mode) {
+       /* Query BIOS to determine if LTTPR functionality is forced on by system */
+       if (bios->funcs->get_lttpr_caps) {
+               enum bp_result bp_query_result;
+               uint8_t is_vbios_lttpr_enable = 0;
+
+               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
+               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+       }
+
+       if (bios->funcs->get_lttpr_interop) {
+               enum bp_result bp_query_result;
+               uint8_t is_vbios_interop_enabled = 0;
+
+               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
+               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+       }
+
+       /*
+        * Logic to determine LTTPR mode
+        */
+       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+       if (vbios_lttpr_enable && vbios_lttpr_interop)
+               link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+               if (link->dc->config.allow_lttpr_non_transparent_mode)
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+               else
+                       link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+       } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+               if (!link->dc->config.allow_lttpr_non_transparent_mode
+                       || !link->dc->caps.extended_aux_timeout_support)
+                       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+               else
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       }
+
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
                /* By reading LTTPR capability, RX assumes that we will enable
-                * LTTPR non transparent if LTTPR is present.
-                * Therefore, only query LTTPR capability when both LTTPR
-                * extended aux timeout and
-                * non transparent mode is supported by hardware
+                * LTTPR extended aux timeout if LTTPR is present.
                 */
                status = core_link_read_dpcd(
                                link,
@@ -3462,9 +3584,6 @@ static bool retrieve_link_cap(struct dc_link *link)
                        CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
        }
 
-       /* decide lttpr non transparent mode */
-       link->lttpr_non_transparent_mode = is_lttpr_present;
-
        if (!is_lttpr_present)
                dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
new file mode 100644 (file)
index 0000000..1361b87
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dc_link_dp.h"
+
+/* Check whether stream is supported by DIG link encoders. */
+static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
+{
+       bool is_dig_stream = false;
+       struct link_encoder *link_enc = NULL;
+       int i;
+
+       /* Loop over created link encoder objects. */
+       for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               link_enc = stream->ctx->dc->res_pool->link_encoders[i];
+
+               if (link_enc &&
+                               ((uint32_t)stream->signal & link_enc->output_signals)) {
+                       if (dc_is_dp_signal(stream->signal)) {
+                               /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
+                               struct dc_link_settings link_settings = {0};
+
+                               decide_link_settings(stream, &link_settings);
+                               if ((link_settings.link_rate >= LINK_RATE_LOW) &&
+                                               link_settings.link_rate <= LINK_RATE_HIGH3) {
+                                       is_dig_stream = true;
+                                       break;
+                               }
+                       } else {
+                               is_dig_stream = true;
+                               break;
+                       }
+               }
+       }
+
+       return is_dig_stream;
+}
+
+/* Update DIG link encoder resource tracking variables in dc_state. */
+static void update_link_enc_assignment(
+               struct dc_state *state,
+               struct dc_stream_state *stream,
+               enum engine_id eng_id,
+               bool add_enc)
+{
+       int eng_idx;
+       int stream_idx;
+       int i;
+
+       if (eng_id != ENGINE_ID_UNKNOWN) {
+               eng_idx = eng_id - ENGINE_ID_DIGA;
+               stream_idx = -1;
+
+               /* Index of stream in dc_state used to update correct entry in
+                * link_enc_assignments table.
+                */
+               for (i = 0; i < state->stream_count; i++) {
+                       if (stream == state->streams[i]) {
+                               stream_idx = i;
+                               break;
+                       }
+               }
+
+               /* Update link encoder assignments table, link encoder availability
+                * pool and link encoder assigned to stream in state.
+                * Add/remove encoder resource to/from stream.
+                */
+               if (stream_idx != -1) {
+                       if (add_enc) {
+                               state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
+                                       .valid = true,
+                                       .ep_id = (struct display_endpoint_id) {
+                                               .link_id = stream->link->link_id,
+                                               .ep_type = stream->link->ep_type},
+                                       .eng_id = eng_id};
+                               state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
+                               stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
+                       } else {
+                               state->res_ctx.link_enc_assignments[stream_idx].valid = false;
+                               state->res_ctx.link_enc_avail[eng_idx] = eng_id;
+                               stream->link_enc = NULL;
+                       }
+               } else {
+                       dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
+               }
+       }
+}
+
+/* Return first available DIG link encoder. */
+static enum engine_id find_first_avail_link_enc(
+               struct dc_context *ctx,
+               struct dc_state *state)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+       int i;
+
+       for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               eng_id = state->res_ctx.link_enc_avail[i];
+               if (eng_id != ENGINE_ID_UNKNOWN)
+                       break;
+       }
+
+       return eng_id;
+}
+
+/* Return stream using DIG link encoder resource. NULL if unused. */
+static struct dc_stream_state *get_stream_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id)
+{
+       struct dc_stream_state *stream = NULL;
+       int stream_idx = -1;
+       int i;
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid && (assignment.eng_id == eng_id)) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               stream = state->streams[stream_idx];
+       else
+               dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
+
+       return stream;
+}
+
+void link_enc_cfg_init(
+               struct dc *dc,
+               struct dc_state *state)
+{
+       int i;
+
+       for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               if (dc->res_pool->link_encoders[i])
+                       state->res_ctx.link_enc_avail[i] = (enum engine_id) i;
+               else
+                       state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
+       }
+}
+
+void link_enc_cfg_link_encs_assign(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *streams[],
+               uint8_t stream_count)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+       int i;
+
+       /* Release DIG link encoder resources before running assignment algorithm. */
+       for (i = 0; i < stream_count; i++)
+               dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+
+       /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
+       for (i = 0; i < stream_count; i++) {
+               struct dc_stream_state *stream = streams[i];
+
+               /* Skip stream if not supported by DIG link encoder. */
+               if (!is_dig_link_enc_stream(stream))
+                       continue;
+
+               /* Physical endpoints have a fixed mapping to DIG link encoders. */
+               if (!stream->link->is_dig_mapping_flexible) {
+                       eng_id = stream->link->eng_id;
+                       update_link_enc_assignment(state, stream, eng_id, true);
+               }
+       }
+
+       /* (b) Then assign encoders to mappable endpoints. */
+       eng_id = ENGINE_ID_UNKNOWN;
+
+       for (i = 0; i < stream_count; i++) {
+               struct dc_stream_state *stream = streams[i];
+
+               /* Skip stream if not supported by DIG link encoder. */
+               if (!is_dig_link_enc_stream(stream))
+                       continue;
+
+               /* Mappable endpoints have a flexible mapping to DIG link encoders. */
+               if (stream->link->is_dig_mapping_flexible) {
+                       eng_id = find_first_avail_link_enc(stream->ctx, state);
+                       update_link_enc_assignment(state, stream, eng_id, true);
+               }
+       }
+}
+
+void link_enc_cfg_link_enc_unassign(
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+
+       /* Only DIG link encoders. */
+       if (!is_dig_link_enc_stream(stream))
+               return;
+
+       if (stream->link_enc)
+               eng_id = stream->link_enc->preferred_engine;
+
+       update_link_enc_assignment(state, stream, eng_id, false);
+}
+
+bool link_enc_cfg_is_transmitter_mappable(
+               struct dc_state *state,
+               struct link_encoder *link_enc)
+{
+       bool is_mappable = false;
+       enum engine_id eng_id = link_enc->preferred_engine;
+       struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id);
+
+       if (stream)
+               is_mappable = stream->link->is_dig_mapping_flexible;
+
+       return is_mappable;
+}
+
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id)
+{
+       struct dc_link *link = NULL;
+       int stream_idx = -1;
+       int i;
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid && (assignment.eng_id == eng_id)) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               link = state->streams[stream_idx]->link;
+       else
+               dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+
+       return link;
+}
+
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+               struct dc_state *state,
+               struct dc_link *link)
+{
+       struct link_encoder *link_enc = NULL;
+       struct display_endpoint_id ep_id;
+       int stream_idx = -1;
+       int i;
+
+       ep_id = (struct display_endpoint_id) {
+               .link_id = link->link_id,
+               .ep_type = link->ep_type};
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid &&
+                               assignment.ep_id.link_id.id == ep_id.link_id.id &&
+                               assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
+                               assignment.ep_id.link_id.type == ep_id.link_id.type &&
+                               assignment.ep_id.ep_type == ep_id.ep_type) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               link_enc = state->streams[stream_idx]->link_enc;
+       else
+               dm_output_to_console("%s: No link encoder used by link(%d).\n", __func__, link->link_index);
+
+       return link_enc;
+}
index 124ce215fca53c35c59a5f1acda8df8fa9e6c7fc..48ad1a8d4a74240ebb8464ab5d32e05d80f57378 100644 (file)
@@ -14,6 +14,7 @@
 #include "dpcd_defs.h"
 #include "dsc.h"
 #include "resource.h"
+#include "link_enc_cfg.h"
 #include "clk_mgr.h"
 
 static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
@@ -95,7 +96,7 @@ void dp_enable_link_phy(
        enum clock_source_id clock_source,
        const struct dc_link_settings *link_settings)
 {
-       struct link_encoder *link_enc = link->link_enc;
+       struct link_encoder *link_enc;
        struct dc  *dc = link->ctx->dc;
        struct dmcu *dmcu = dc->res_pool->dmcu;
 
@@ -105,6 +106,13 @@ void dp_enable_link_phy(
                        link->dc->res_pool->dp_clock_source;
        unsigned int i;
 
+       /* Link should always be assigned encoder when en-/disabling. */
+       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
+
        if (link->connector_signal == SIGNAL_TYPE_EDP) {
                link->dc->hwss.edp_power_control(link, true);
                link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -227,6 +235,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
 {
        struct dc  *dc = link->ctx->dc;
        struct dmcu *dmcu = dc->res_pool->dmcu;
+       struct link_encoder *link_enc;
+
+       /* Link should always be assigned encoder when en-/disabling. */
+       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
 
        if (!link->wa_flags.dp_keep_receiver_powered)
                dp_receiver_power_ctrl(link, false);
@@ -234,13 +250,13 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
        if (signal == SIGNAL_TYPE_EDP) {
                if (link->dc->hwss.edp_backlight_control)
                        link->dc->hwss.edp_backlight_control(link, false);
-               link->link_enc->funcs->disable_output(link->link_enc, signal);
+               link_enc->funcs->disable_output(link_enc, signal);
                link->dc->hwss.edp_power_control(link, false);
        } else {
                if (dmcu != NULL && dmcu->funcs->lock_phy)
                        dmcu->funcs->lock_phy(dmcu);
 
-               link->link_enc->funcs->disable_output(link->link_enc, signal);
+               link_enc->funcs->disable_output(link_enc, signal);
 
                if (dmcu != NULL && dmcu->funcs->unlock_phy)
                        dmcu->funcs->unlock_phy(dmcu);
@@ -302,7 +318,7 @@ void dp_set_hw_lane_settings(
 {
        struct link_encoder *encoder = link->link_enc;
 
-       if (link->lttpr_non_transparent_mode && !is_immediate_downstream(link, offset))
+       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
                return;
 
        /* call Encoder to set lane settings */
index 3c91d16c271053e5506e901de65ce8eb144bf7b5..ac7a75887f952900179d65d3bf50d0f1e38312cc 100644 (file)
@@ -1930,6 +1930,9 @@ enum dc_status dc_remove_stream_from_ctx(
                                dc->res_pool,
                        del_pipe->stream_res.stream_enc,
                        false);
+       /* Release link encoder from stream in new dc_state. */
+       if (dc->res_pool->funcs->link_enc_unassign)
+               dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
 
        if (del_pipe->stream_res.audio)
                update_audio_usage(
@@ -2842,6 +2845,10 @@ bool pipe_need_reprogram(
        if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
                return true;
 
+       /* DIG link encoder resource assignment for stream changed. */
+       if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc)
+               return true;
+
        return false;
 }
 
index d163007e057c877c89c18890e0de23f4a1e5fb02..8108b82bac60ca37cd48d51ed417a9bb8fca158d 100644 (file)
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.127"
+#define DC_VER "3.2.130"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -460,6 +460,7 @@ struct dc_debug_options {
        enum pipe_split_policy pipe_split_policy;
        bool force_single_disp_pipe_split;
        bool voltage_align_fclk;
+       bool disable_min_fclk;
 
        bool disable_dfs_bypass;
        bool disable_dpp_power_gate;
index 86ab8f16f6213b57cf6329059838ed3e31969fe2..67abda44eb1f4b26d735b1407685eaae4a11baca 100644 (file)
@@ -150,6 +150,12 @@ struct dc_vbios_funcs {
                        struct dc_bios *dcb,
                        struct graphics_object_id object_id,
                        struct bp_disp_connector_caps_info *info);
+       enum bp_result (*get_lttpr_caps)(
+                       struct dc_bios *dcb,
+                       uint8_t *dce_caps);
+       enum bp_result (*get_lttpr_interop)(
+                       struct dc_bios *dcb,
+                       uint8_t *dce_caps);
 };
 
 struct bios_registers {
index c50ef5a909a635dbadff6d4d9192ccfb0e9df38e..b0013e674864d15877e7e2f2a1e9f2f9170dce78 100644 (file)
@@ -35,6 +35,13 @@ enum dc_link_fec_state {
        dc_link_fec_ready,
        dc_link_fec_enabled
 };
+
+enum lttpr_mode {
+       LTTPR_MODE_NON_LTTPR,
+       LTTPR_MODE_TRANSPARENT,
+       LTTPR_MODE_NON_TRANSPARENT,
+};
+
 struct dc_link_status {
        bool link_active;
        struct dpcd_caps *dpcd_caps;
@@ -100,7 +107,7 @@ struct dc_link {
        bool link_state_valid;
        bool aux_access_disabled;
        bool sync_lt_in_progress;
-       bool lttpr_non_transparent_mode;
+       enum lttpr_mode lttpr_mode;
        bool is_internal_display;
 
        /* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -125,6 +132,11 @@ struct dc_link {
        uint8_t hpd_src;
 
        uint8_t link_enc_hw_inst;
+       /* DIG link encoder ID. Used as index in link encoder resource pool.
+        * For links with fixed mapping to DIG, this is not changed after dc_link
+        * object creation.
+        */
+       enum engine_id eng_id;
 
        bool test_pattern_enabled;
        union compliance_test_state compliance_test_state;
@@ -144,6 +156,11 @@ struct dc_link {
        struct panel_cntl *panel_cntl;
        struct link_encoder *link_enc;
        struct graphics_object_id link_id;
+       /* Endpoint type distinguishes display endpoints which do not have entries
+        * in the BIOS connector table from those that do. Helps when tracking link
+        * encoder to display endpoint assignments.
+        */
+       enum display_endpoint_type ep_type;
        union ddi_channel_mapping ddi_channel_mapping;
        struct connector_device_tag_info device_tag;
        struct dpcd_caps dpcd_caps;
index e747370fc43b8a8cdc8b285a4cf3cfc7262c677a..b0297f07f9de0705b9782ed72db847a7323b0fe6 100644 (file)
@@ -130,12 +130,24 @@ union stream_update_flags {
        uint32_t raw;
 };
 
+struct test_pattern {
+       enum dp_test_pattern type;
+       enum dp_test_pattern_color_space color_space;
+       struct link_training_settings const *p_link_settings;
+       unsigned char const *p_custom_pattern;
+       unsigned int cust_pattern_size;
+};
+
 struct dc_stream_state {
        // sink is deprecated, new code should not reference
        // this pointer
        struct dc_sink *sink;
 
        struct dc_link *link;
+       /* For dynamic link encoder assignment, update the link encoder assigned to
+        * a stream via the volatile dc_state rather than the static dc_link.
+        */
+       struct link_encoder *link_enc;
        struct dc_panel_patch sink_patches;
        union display_content_support content_support;
        struct dc_crtc_timing timing;
@@ -227,6 +239,8 @@ struct dc_stream_state {
 
        uint32_t stream_id;
        bool is_dsc_enabled;
+
+       struct test_pattern test_pattern;
        union stream_update_flags update_flags;
 
        bool has_non_synchronizable_pclk;
@@ -264,6 +278,8 @@ struct dc_stream_update {
        struct dc_dsc_config *dsc_config;
        struct dc_transfer_func *func_shaper;
        struct dc_3dlut *lut3d_func;
+
+       struct test_pattern *pending_test_pattern;
 };
 
 bool dc_is_stream_unchanged(
index 80757a0ea7c6519e30f31e9846b50760e3df0112..432754eaf10b8815aa33d26d18689dcafe8a7b3b 100644 (file)
@@ -113,6 +113,7 @@ struct dc_context {
        struct gpio_service *gpio_service;
        uint32_t dc_sink_id_count;
        uint32_t dc_stream_id_count;
+       uint32_t dc_edp_id_count;
        uint64_t fbc_gpu_addr;
        struct dc_dmub_srv *dmub_srv;
 
@@ -687,7 +688,8 @@ enum dc_psr_state {
        PSR_STATE5,
        PSR_STATE5a,
        PSR_STATE5b,
-       PSR_STATE5c
+       PSR_STATE5c,
+       PSR_STATE_INVALID = 0xFF
 };
 
 struct psr_config {
@@ -934,4 +936,19 @@ enum dc_psr_version {
        DC_PSR_VERSION_UNSUPPORTED              = 0xFFFFFFFF,
 };
 
+/* Possible values of display_endpoint_id.endpoint */
+enum display_endpoint_type {
+       DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
+       DISPLAY_ENDPOINT_UNKNOWN = -1
+};
+
+/* Extends graphics_object_id with an additional member 'ep_type' for
+ * distinguishing between physical endpoints (with entries in BIOS connector table) and
+ * logical endpoints.
+ */
+struct display_endpoint_id {
+       struct graphics_object_id link_id;
+       enum display_endpoint_type ep_type;
+};
+
 #endif /* DC_TYPES_H_ */
index 4e87e70237e3da3cb551a1365d481d8f22564f15..874b132fe1d782f330353c628bf892da03f4618c 100644 (file)
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
        const struct dce_abm_shift *abm_shift,
        const struct dce_abm_mask *abm_mask)
 {
-       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
 
        if (abm_dce == NULL) {
                BREAK_TO_DEBUGGER();
index 4f864501e046c1abfd5760488c854f60f87c7eeb..8cd841320dedb17fb7c960b4a3939deb31e80495 100644 (file)
@@ -1133,7 +1133,7 @@ struct dmcu *dcn10_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -1154,7 +1154,7 @@ struct dmcu *dcn20_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -1175,7 +1175,7 @@ struct dmcu *dcn21_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
index 15ed09b7a452e650ef1e5a955dba14fa17ed0eec..28ff059aa7f3739947c594f478fc7f5dfa163e31 100644 (file)
@@ -80,19 +80,26 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
 static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
 {
        struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
-       uint32_t raw_state;
+       uint32_t raw_state = 0;
+       uint32_t retry_count = 0;
        enum dmub_status status;
 
-       // Send gpint command and wait for ack
-       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
-
-       if (status == DMUB_STATUS_OK) {
-               // GPINT was executed, get response
-               dmub_srv_get_gpint_response(srv, &raw_state);
-               *state = convert_psr_state(raw_state);
-       } else
-               // Return invalid state when GPINT times out
-               *state = 0xFF;
+       do {
+               // Send gpint command and wait for ack
+               status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+               if (status == DMUB_STATUS_OK) {
+                       // GPINT was executed, get response
+                       dmub_srv_get_gpint_response(srv, &raw_state);
+                       *state = convert_psr_state(raw_state);
+               } else
+                       // Return invalid state when GPINT times out
+                       *state = PSR_STATE_INVALID;
+
+               // Assert if max retry hit
+               if (retry_count >= 1000)
+                       ASSERT(0);
+       } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
 }
 
 /*
index 804092f81f85807e4cc10729f69f78a6ff5fffb9..873c6f2d2cd97d83386ecf04262916b52a91acf4 100644 (file)
@@ -1846,8 +1846,7 @@ void dce110_set_safe_displaymarks(
  ******************************************************************************/
 
 static void set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number)
+               int num_pipes, struct dc_crtc_timing_adjust adjust)
 {
        int i = 0;
        struct drr_params params = {0};
@@ -1856,8 +1855,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
        // Note DRR trigger events are generated regardless of whether num frames met.
        unsigned int num_frames = 2;
 
-       params.vertical_total_max = vmax;
-       params.vertical_total_min = vmin;
+       params.vertical_total_max = adjust.v_total_max;
+       params.vertical_total_min = adjust.v_total_min;
 
        /* TODO: If multiple pipes are to be supported, you need
         * some GSL stuff. Static screen triggers may be programmed differently
@@ -1867,7 +1866,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
                pipe_ctx[i]->stream_res.tg->funcs->set_drr(
                        pipe_ctx[i]->stream_res.tg, &params);
 
-               if (vmax != 0 && vmin != 0)
+               if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
                        pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
                                        pipe_ctx[i]->stream_res.tg,
                                        event_triggers, num_frames);
index 612450f992782ac9aae6602f568928ba89b26914..725d92e40cd30bf216580e1ff707cf6a14ae1aee 100644 (file)
@@ -526,7 +526,7 @@ static struct output_pixel_processor *dce80_opp_create(
        return &opp->base;
 }
 
-struct dce_aux *dce80_aux_engine_create(
+static struct dce_aux *dce80_aux_engine_create(
        struct dc_context *ctx,
        uint32_t inst)
 {
@@ -564,7 +564,7 @@ static const struct dce_i2c_mask i2c_masks = {
                I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
-struct dce_i2c_hw *dce80_i2c_hw_create(
+static struct dce_i2c_hw *dce80_i2c_hw_create(
        struct dc_context *ctx,
        uint32_t inst)
 {
@@ -580,7 +580,7 @@ struct dce_i2c_hw *dce80_i2c_hw_create(
        return dce_i2c_hw;
 }
 
-struct dce_i2c_sw *dce80_i2c_sw_create(
+static struct dce_i2c_sw *dce80_i2c_sw_create(
        struct dc_context *ctx)
 {
        struct dce_i2c_sw *dce_i2c_sw =
@@ -714,7 +714,7 @@ static const struct encoder_feature_support link_enc_feature = {
                .flags.bits.IS_TPS3_CAPABLE = true
 };
 
-struct link_encoder *dce80_link_encoder_create(
+static struct link_encoder *dce80_link_encoder_create(
        const struct encoder_init_data *enc_init_data)
 {
        struct dce110_link_encoder *enc110 =
@@ -753,7 +753,7 @@ static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_d
        return &panel_cntl->base;
 }
 
-struct clock_source *dce80_clock_source_create(
+static struct clock_source *dce80_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
        enum clock_source_id id,
@@ -777,7 +777,7 @@ struct clock_source *dce80_clock_source_create(
        return NULL;
 }
 
-void dce80_clock_source_destroy(struct clock_source **clk_src)
+static void dce80_clock_source_destroy(struct clock_source **clk_src)
 {
        kfree(TO_DCE110_CLK_SRC(*clk_src));
        *clk_src = NULL;
@@ -867,7 +867,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
        }
 }
 
-bool dce80_validate_bandwidth(
+static bool dce80_validate_bandwidth(
        struct dc *dc,
        struct dc_state *context,
        bool fast_validate)
@@ -912,7 +912,7 @@ static bool dce80_validate_surface_sets(
        return true;
 }
 
-enum dc_status dce80_validate_global(
+static enum dc_status dce80_validate_global(
                struct dc *dc,
                struct dc_state *context)
 {
index 9eb33eae0e817c3c7f25ead1eee3b44ca259fa2c..7c939c0a977b31afc904552aca8b018c32aa1840 100644 (file)
@@ -1893,7 +1893,7 @@ uint64_t reduceSizeAndFraction(
        num = *numerator;
        denom = *denominator;
        for (i = 0; i < count; i++) {
-               uint32_t num_reminder, denom_reminder;
+               uint32_t num_remainder, denom_remainder;
                uint64_t num_result, denom_result;
                if (checkUint32Bounary &&
                        num <= max_int32 && denom <= max_int32) {
@@ -1901,13 +1901,13 @@ uint64_t reduceSizeAndFraction(
                        break;
                }
                do {
-                       num_result = div_u64_rem(num, prime_numbers[i], &num_reminder);
-                       denom_result = div_u64_rem(denom, prime_numbers[i], &denom_reminder);
-                       if (num_reminder == 0 && denom_reminder == 0) {
+                       num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
+                       denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
+                       if (num_remainder == 0 && denom_remainder == 0) {
                                num = num_result;
                                denom = denom_result;
                        }
-               } while (num_reminder == 0 && denom_reminder == 0);
+               } while (num_remainder == 0 && denom_remainder == 0);
        }
        *numerator = num;
        *denominator = denom;
@@ -3271,8 +3271,7 @@ void dcn10_optimize_bandwidth(
 }
 
 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number)
+               int num_pipes, struct dc_crtc_timing_adjust adjust)
 {
        int i = 0;
        struct drr_params params = {0};
@@ -3281,11 +3280,10 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
        // Note DRR trigger events are generated regardless of whether num frames met.
        unsigned int num_frames = 2;
 
-       params.vertical_total_max = vmax;
-       params.vertical_total_min = vmin;
-       params.vertical_total_mid = vmid;
-       params.vertical_total_mid_frame_num = vmid_frame_number;
-
+       params.vertical_total_max = adjust.v_total_max;
+       params.vertical_total_min = adjust.v_total_min;
+       params.vertical_total_mid = adjust.v_total_mid;
+       params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
        /* TODO: If multiple pipes are to be supported, you need
         * some GSL stuff. Static screen triggers may be programmed differently
         * as well.
@@ -3293,7 +3291,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
        for (i = 0; i < num_pipes; i++) {
                pipe_ctx[i]->stream_res.tg->funcs->set_drr(
                        pipe_ctx[i]->stream_res.tg, &params);
-               if (vmax != 0 && vmin != 0)
+               if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
                        pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
                                        pipe_ctx[i]->stream_res.tg,
                                        event_triggers, num_frames);
@@ -3981,3 +3979,19 @@ void dcn10_get_clock(struct dc *dc,
                                dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
 
 }
+
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
+{
+       struct resource_pool *pool = dc->res_pool;
+       int i;
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct hubp *hubp = pool->hubps[i];
+               struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+               hubp->funcs->hubp_read_state(hubp);
+
+               if (!s->blank_en)
+                       dcc_en_bits[i] = s->dcc_en ? 1 : 0;
+       }
+}
index e0800cd1cc02091fec2a3589e152bb8a9905ef59..37bec421fde8e34c70d32cb986246193c79ea666 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2016 Advanced Micro Devices, Inc.
+* Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -145,8 +145,7 @@ bool dcn10_dummy_display_power_gating(
                struct dc_bios *dcb,
                enum pipe_gating_control power_gating);
 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number);
+               int num_pipes, struct dc_crtc_timing_adjust adjust);
 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
                int num_pipes,
                struct crtc_position *position);
@@ -210,4 +209,6 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
 void dcn10_verify_allow_pstate_change_high(struct dc *dc);
 
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
+
 #endif /* __DC_HWSS_DCN10_H__ */
index 254300b06b434b0332df1f93e5c56a18d17fcc58..d532c78ee76472087fcdb37475b9053b0305f1ed 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .set_backlight_level = dce110_set_backlight_level,
        .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
        .set_pipe = dce110_set_pipe,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
index 6138f4887de7fa597d385059e2801de54370c4bf..677663cc7bff1350d2f39941f972c7ccb4df0690 100644 (file)
@@ -131,6 +131,22 @@ void optc1_setup_vertical_interrupt2(
                        OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
 }
 
+/**
+ * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
+ * Start offset begins with vstartup and goes for x number of clocks,
+ * end offset starts from end of vupdate to x number of clocks.
+ */
+void optc1_set_vupdate_keepout(struct timing_generator *optc,
+                              struct vupdate_keepout_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+                 MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
+                 MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
+                 OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
+}
+
 /**
  * program_timing_generator   used by mode timing set
  * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
index 2529723beeb15c6bb2f47261bb3c6614a8f5b555..cabfe83fd634c52f5236c15d88337250a43fcf41 100644 (file)
@@ -194,6 +194,9 @@ struct dcn_optc_registers {
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
        SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
        SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
        SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
index d079f4e491e59ff92c19026fad4b4b5539f201a7..f962b905e79e6051e65b7460d6ac9907a0123678 100644 (file)
@@ -82,7 +82,7 @@ const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
        .meta_chunk_size_kbytes = 2,
        .writeback_chunk_size_kbytes = 2,
        .line_buffer_size_bits = 589824,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .IsLineBufferBppFixed = 0,
        .LineBufferFixedBpp = -1,
        .writeback_luma_buffer_size_kbytes = 12,
@@ -619,7 +619,6 @@ static const struct dc_debug_options debug_defaults_drv = {
                .recovery_enabled = false, /*enable this by default after testing.*/
                .max_downscale_src_width = 3840,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -631,7 +630,6 @@ static const struct dc_debug_options debug_defaults_diags = {
                .disable_pplib_clock_request = true,
                .disable_pplib_wm_range = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static void dcn10_dpp_destroy(struct dpp **dpp)
index 62cc2651e00c1a1fea1876b09780053d453520a2..8774406120fc1c5ce8297c906880ca276f29af30 100644 (file)
@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
        const struct dccg_shift *dccg_shift,
        const struct dccg_mask *dccg_mask)
 {
-       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
        struct dccg *base;
 
        if (dccg_dcn == NULL) {
index 7218ed9e43dc5c79d8b6a689f7387b7004820dab..b5bb613eed4d086a9977f13b15e0386bd7d5c284 100644 (file)
@@ -95,6 +95,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
 #endif
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
index fa013496e26baf8ae1aa817d6c9fcf389633420b..2f9bfaeaba8d612382fd990591aebe0e23ad6435 100644 (file)
@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
        } else {
                AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
 
-               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
-
+               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
        }
 
        //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
index ea7eaf7d755f291ef4b817f055783c229adfe5e3..3139d90017eed184d88ec51309ce613c34479295 100644 (file)
@@ -134,22 +134,6 @@ void optc2_set_gsl_window(struct timing_generator *optc,
                OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
 }
 
-/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc2_set_vupdate_keepout(struct timing_generator *optc,
-                  const struct vupdate_keepout_params *params)
-{
-       struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
-       REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
-               MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
-               MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
-               OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
 void optc2_set_gsl_source_select(
                struct timing_generator *optc,
                int group_idx,
index e0a0a8a8e2c606214f72fc32cc6e8c2ec6bfc625..3dee2ec2a1bbd1665212947e4077fe59adf8deb5 100644 (file)
@@ -56,9 +56,6 @@
        SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
        SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
        SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
        SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
        SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
index 2307b3517821da946f2de5a9bf356cad71825c53..f65a6904d09c826e04762f2bf3088fdb88131048 100644 (file)
@@ -112,7 +112,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
@@ -180,7 +180,7 @@ static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
@@ -1075,7 +1075,6 @@ static const struct dc_debug_options debug_defaults_drv = {
                .scl_reset_length10 = true,
                .sanity_checks = false,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -1092,7 +1091,6 @@ static const struct dc_debug_options debug_defaults_diags = {
                .scl_reset_length10 = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
                .enable_tri_buf = true,
-               .use_max_lb = true
 };
 
 void dcn20_dpp_destroy(struct dpp **dpp)
@@ -1106,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
        uint32_t inst)
 {
        struct dcn20_dpp *dpp =
-               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
 
        if (!dpp)
                return NULL;
@@ -1124,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn10_ipp *ipp =
-               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
 
        if (!ipp) {
                BREAK_TO_DEBUGGER();
@@ -1141,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn20_opp *opp =
-               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
 
        if (!opp) {
                BREAK_TO_DEBUGGER();
@@ -1158,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
        uint32_t inst)
 {
        struct aux_engine_dce110 *aux_engine =
-               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
 
        if (!aux_engine)
                return NULL;
@@ -1196,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
        uint32_t inst)
 {
        struct dce_i2c_hw *dce_i2c_hw =
-               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
 
        if (!dce_i2c_hw)
                return NULL;
@@ -1209,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
 struct mpc *dcn20_mpc_create(struct dc_context *ctx)
 {
        struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
-                                         GFP_KERNEL);
+                                         GFP_ATOMIC);
 
        if (!mpc20)
                return NULL;
@@ -1227,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
 {
        int i;
        struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
-                                         GFP_KERNEL);
+                                         GFP_ATOMIC);
 
        if (!hubbub)
                return NULL;
@@ -1255,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
                uint32_t instance)
 {
        struct optc *tgn10 =
-               kzalloc(sizeof(struct optc), GFP_KERNEL);
+               kzalloc(sizeof(struct optc), GFP_ATOMIC);
 
        if (!tgn10)
                return NULL;
@@ -1334,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
        bool dp_clk_src)
 {
        struct dce110_clk_src *clk_src =
-               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
 
        if (!clk_src)
                return NULL;
@@ -1440,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn20_dsc *dsc =
-               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
 
        if (!dsc) {
                BREAK_TO_DEBUGGER();
@@ -1574,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
        uint32_t inst)
 {
        struct dcn20_hubp *hubp2 =
-               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
 
        if (!hubp2)
                return NULL;
@@ -2218,7 +2216,7 @@ int dcn20_populate_dml_pipes_from_context(
                        pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
 
                /* todo: default max for now, until there is logic reflecting this in dc*/
-               pipes[pipe_cnt].dout.output_bpc = 12;
+               pipes[pipe_cnt].dout.dsc_input_bpc = 12;
                /*fill up the audio sample rate (unit in kHz)*/
                get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
                pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
@@ -3396,7 +3394,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
 
 static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
 {
-       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
 
        if (!pp_smu)
                return pp_smu;
@@ -4042,7 +4040,7 @@ struct resource_pool *dcn20_create_resource_pool(
                struct dc *dc)
 {
        struct dcn20_resource_pool *pool =
-               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
 
        if (!pool)
                return NULL;
index 074e2713257f179dbc8cc642665e3d50765db686..4f20a85ff39689fa52baa4f8e31de6279853ba38 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 #endif
        .is_abm_supported = dcn21_is_abm_supported,
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
index e62f931fc269fcfb7e9c04798a17924fd4bd32c3..8e3f1d0b4cc3ca995f867643d55fc5485ba9f337 100644 (file)
@@ -55,7 +55,6 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
 #include "virtual/virtual_stream_encoder.h"
-#include "dce110/dce110_resource.h"
 #include "dml/display_mode_vba.h"
 #include "dcn20/dcn20_dccg.h"
 #include "dcn21/dcn21_dccg.h"
@@ -115,7 +114,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index 705fbfc375029f7c1d089f0dca60b4da9b27f6fc..8a32772d4e91af4b673fe0953ea4d9786d2f58c9 100644 (file)
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+       HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
index c4c14e9c1309c2653bbbab16e0843f8fecd0dd24..bf7fa98b39eb2e3426ab7b3b58cbd10a26387004 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .hardware_release = dcn30_hardware_release,
        .set_pipe = dcn21_set_pipe,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
index 263c2986682d2521b9450093e8e72d706d6b6d11..4a5fa23d8e7b06911d13ed22c6e197d74df2b8f8 100644 (file)
@@ -120,7 +120,7 @@ struct _vcs_dpi_ip_params_st dcn3_0_ip = {
        .dcc_supported = true,
        .writeback_interface_buffer_size_kbytes = 90,
        .writeback_line_buffer_buffer_size = 0,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index bdad72140cbcec03fe2b61a6b544ec150904d4f4..0d90523c7cdcf6814ff8e23c848dc29f9ef360eb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn301_private_funcs = {
index 622a5bf9737f713240f14641cce2c090d372c47d..5b54b7fc5105d05d75d4f04a1443923fbbbbc637 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -116,7 +116,7 @@ struct _vcs_dpi_ip_params_st dcn3_01_ip = {
        .dcc_supported = true,
        .writeback_interface_buffer_size_kbytes = 90,
        .writeback_line_buffer_buffer_size = 656640,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index 0723e29fd42e305a5b0dcf2f05a230d11ba61f43..fc2dea243d1ba06b07cae3b418a90709a7be8049 100644 (file)
@@ -101,7 +101,7 @@ struct _vcs_dpi_ip_params_st dcn3_02_ip = {
                .dcc_supported = true,
                .writeback_interface_buffer_size_kbytes = 90,
                .writeback_line_buffer_buffer_size = 0,
-               .max_line_buffer_lines = 32,
+               .max_line_buffer_lines = 12,
                .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
                .writeback_chroma_buffer_size_kbytes = 8,
                .writeback_chroma_line_buffer_width_pixels = 4,
@@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 12,
+               .sr_exit_time_us = 15.5,
                .sr_enter_plus_exit_time_us = 20,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
index 72423dc425dc015b155a67f0a6bc3af4691e1d88..799bae229e6797e6488eac570da53bb3b18ce3ab 100644 (file)
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 9c78446c3a9d8b2a3a195674b86015d047f65b6a..6a6d5970d1d58302793a6a145e768cf308526b8e 100644 (file)
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index edd41d3582910385ca17c5089f969c10ff157fee..dc1c81a6e377161b0155db62652ad7e44b0f4096 100644 (file)
@@ -277,13 +277,31 @@ static void handle_det_buf_split(
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 0f14f205ebe593c3ab3a25d1091de8395fba7a71..04601a767a8f1f637d41872be3d198ec5b5776f4 100644 (file)
@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 098d6433f7f39ed83b4fe304f46601c5043392ad..1f7b6ddf30203e9688168155e35027b3bb23d903 100644 (file)
@@ -226,7 +226,7 @@ void dml_log_pipe_params(
                dml_print("DML PARAMS: PIPE [%d] DISPLAY OUTPUT PARAMS:\n", i);
                dml_print("DML PARAMS:     output_type                = %d\n", dout->output_type);
                dml_print("DML PARAMS:     output_format              = %d\n", dout->output_format);
-               dml_print("DML PARAMS:     output_bpc                 = %d\n", dout->output_bpc);
+               dml_print("DML PARAMS:     dsc_input_bpc              = %d\n", dout->dsc_input_bpc);
                dml_print("DML PARAMS:     output_bpp                 = %3.4f\n", dout->output_bpp);
                dml_print("DML PARAMS:     dp_lanes                   = %d\n", dout->dp_lanes);
                dml_print("DML PARAMS:     dsc_enable                 = %d\n", dout->dsc_enable);
index 0c5128187e0896868134d3436e41fc9256effc0a..2ece3690bfa31a31a183424c96ee22b8482ede91 100644 (file)
@@ -164,7 +164,7 @@ struct _vcs_dpi_ip_params_st {
        double writeback_max_vscl_ratio;
        double writeback_min_hscl_ratio;
        double writeback_min_vscl_ratio;
-       double maximum_dsc_bits_per_component;
+       unsigned int maximum_dsc_bits_per_component;
        unsigned int writeback_max_hscl_taps;
        unsigned int writeback_max_vscl_taps;
        unsigned int writeback_line_buffer_luma_buffer_size;
@@ -292,10 +292,10 @@ struct writeback_st {
 struct _vcs_dpi_display_output_params_st {
        int dp_lanes;
        double output_bpp;
+       unsigned int dsc_input_bpc;
        int dsc_enable;
        int wb_enable;
        int num_active_wb;
-       int output_bpc;
        int output_type;
        int is_virtual;
        int output_format;
index 94036a9612cf8f11b4c08d4991e8683e60ae7ecd..2a967458065be006ca327f9e1f4a8264453480fa 100644 (file)
@@ -471,7 +471,13 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
                mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
                mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
                                dout->dsc_slices;
-               mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpc;
+               if (!dout->dsc_input_bpc) {
+                       mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+                               ip->maximum_dsc_bits_per_component;
+               } else {
+                       mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+                               dout->dsc_input_bpc;
+               }
                mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
                mode_lib->vba.ActiveWritebacksPerPlane[mode_lib->vba.NumberOfActivePlanes] =
                                dout->num_active_wb;
index 4c3e9cc30167953a1b29f38d100589d4e65ef6ec..414da64f57340a4a65157ebefaaf533abb213eb9 100644 (file)
@@ -344,13 +344,31 @@ static void handle_det_buf_split(
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index eb1a19bf0d81ff959af1615fb5b581cae77954f7..81b92f20d5b6cd6c9cc6c42abe9e09fa2e87649a 100644 (file)
@@ -118,6 +118,27 @@ struct resource_funcs {
                display_e2e_pipe_params_st *pipes,
                bool fast_validate);
 
+       /*
+        * Algorithm for assigning available link encoders to links.
+        *
+        * Update link_enc_assignments table and link_enc_avail list accordingly in
+        * struct resource_context.
+        */
+       void (*link_encs_assign)(
+                       struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *streams[],
+                       uint8_t stream_count);
+       /*
+        * Unassign a link encoder from a stream.
+        *
+        * Update link_enc_assignments table and link_enc_avail list accordingly in
+        * struct resource_context.
+        */
+       void (*link_enc_unassign)(
+                       struct dc_state *state,
+                       struct dc_stream_state *stream);
+
        enum dc_status (*validate_global)(
                struct dc *dc,
                struct dc_state *context);
@@ -358,6 +379,12 @@ struct resource_context {
        uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
        uint8_t dp_clock_source_ref_count;
        bool is_dsc_acquired[MAX_PIPES];
+       /* A table/array of encoder-to-link assignments. One entry per stream.
+        * Indexed by stream index in dc_state.
+        */
+       struct link_enc_assignment link_enc_assignments[MAX_PIPES];
+       /* List of available link encoders. Uses engine ID as encoder identifier. */
+       enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool is_mpc_3dlut_acquired[MAX_PIPES];
 #endif
index 3a29f379d0c858716847bb6034a99bc6e531465d..5dc8d02b40c3c18b5a813f1775d2c2029d5a7253 100644 (file)
@@ -262,14 +262,9 @@ struct clk_mgr_funcs {
 
        /* Get current memclk states from PMFW, update relevant structures */
        void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
-};
-
-struct dpm_clocks;
-struct wartermarks;
 
-struct smu_watermark_set {
-       struct watermarks *wm_set;
-       union large_integer mc_address;
+       /* Get SMU present */
+       bool (*is_smu_present)(struct clk_mgr *clk_mgr);
 };
 
 struct clk_mgr {
@@ -283,7 +278,6 @@ struct clk_mgr {
        struct clk_state_registers_and_bypass boot_snapshot;
        struct clk_bw_params *bw_params;
        struct pp_smu_wm_range_sets ranges;
-       struct smu_watermark_set smu_wm_set;
 };
 
 /* forward declarations */
index 346dcd87dc10651b28bab509b314f21cb89f4c8e..80e1a32bc63dac04216072d047bec4b805975eca 100644 (file)
@@ -29,6 +29,7 @@
 #include "mem_input.h"
 
 #define OPP_ID_INVALID 0xf
+#define MAX_TTU 0xffffff
 
 
 enum cursor_pitch {
index 7f5acd8fb91805147aa9bf74205f024e1d6e11e2..80bc995006458d5756622d90b7758ddb0b23eb35 100644 (file)
@@ -187,4 +187,17 @@ struct link_encoder_funcs {
                struct link_encoder *enc);
 };
 
+/*
+ * Used to track assignments of links (display endpoints) to link encoders.
+ *
+ * Entry in link_enc_assignments table in struct resource_context.
+ * Entries only marked valid once encoder assigned to a link and invalidated once unassigned.
+ * Uses engine ID as identifier since PHY ID not relevant for USB4 DPIA endpoint.
+ */
+struct link_enc_assignment {
+       bool valid;
+       struct display_endpoint_id ep_id;
+       enum engine_id eng_id;
+};
+
 #endif /* LINK_ENCODER_H_ */
index 2fedfcac6705076307eba58d942f5ec6b47ebb90..1d5853c95448d553b2128fe42df9a89ef5e08985 100644 (file)
@@ -118,8 +118,7 @@ struct hw_sequencer_funcs {
                        struct pipe_ctx *pipe_ctx,
                        enum vline_select vline);
        void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
-                       unsigned int vmin, unsigned int vmax,
-                       unsigned int vmid, unsigned int vmid_frame_number);
+                       struct dc_crtc_timing_adjust adjust);
        void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
                        int num_pipes,
                        const struct dc_static_screen_params *events);
@@ -218,6 +217,8 @@ struct hw_sequencer_funcs {
 
        void (*set_pipe)(struct pipe_ctx *pipe_ctx);
 
+       void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
+
        /* Idle Optimization Related */
        bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
new file mode 100644 (file)
index 0000000..7d36e55
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INC_LINK_ENC_CFG_H_
+#define DC_INC_LINK_ENC_CFG_H_
+
+/* This module implements functionality for dynamically assigning DIG link
+ * encoder resources to display endpoints (links).
+ */
+
+#include "core_types.h"
+
+/*
+ * Initialise link encoder resource tracking.
+ */
+void link_enc_cfg_init(
+               struct dc *dc,
+               struct dc_state *state);
+
+/*
+ * Algorithm for assigning available DIG link encoders to streams.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ *
+ * Loop over all streams twice:
+ * a) First assign encoders to unmappable endpoints.
+ * b) Then assign encoders to mappable endpoints.
+ */
+void link_enc_cfg_link_encs_assign(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *streams[],
+               uint8_t stream_count);
+
+/*
+ * Unassign a link encoder from a stream.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ */
+void link_enc_cfg_link_enc_unassign(
+               struct dc_state *state,
+               struct dc_stream_state *stream);
+
+/*
+ * Check whether the transmitter driven by a link encoder is a mappable
+ * endpoint.
+ */
+bool link_enc_cfg_is_transmitter_mappable(
+               struct dc_state *state,
+               struct link_encoder *link_enc);
+
+/* Return link using DIG link encoder resource. NULL if unused. */
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id);
+
+/* Return DIG link encoder used by link. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+               struct dc_state *state,
+               struct dc_link *link);
+
+#endif /* DC_INC_LINK_ENC_CFG_H_ */
index 6ee9dd833b85c2a5a58593bd55e7043f00f31aa7..1a5be2792055e16b8f9220c94b3946c2b87d5c2a 100644 (file)
@@ -187,6 +187,10 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
        .ack = NULL
 };
 
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+       .set = NULL,
+       .ack = NULL
+};
 
 static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .set = NULL,
@@ -205,6 +209,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                        mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+       BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
        .enable_reg = SRI(reg1, block, reg_num),\
@@ -220,7 +227,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .ack_value = \
                block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
        [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -282,6 +301,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .funcs = &vline0_irq_info_funcs\
        }
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
+
 #define dummy_irq_entry() \
        {\
                .funcs = &dummy_irq_info_funcs\
@@ -400,6 +426,7 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
        vline0_int_entry(3),
        vline0_int_entry(4),
        vline0_int_entry(5),
+       dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn21 = {
index 4ec6f6ad8c4837765b0f9d84fb0bc72ef26214f0..914ce2ce1c2fcd0aa2e05a88058fd5192563c280 100644 (file)
@@ -215,6 +215,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                        mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+       BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
        .enable_reg = SRI(reg1, block, reg_num),\
@@ -230,7 +233,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .ack_value = \
                block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
        [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -284,6 +299,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .funcs = &vline0_irq_info_funcs\
        }
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
+
 #define dummy_irq_entry() \
        {\
                .funcs = &dummy_irq_info_funcs\
@@ -398,6 +420,7 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
        vline0_int_entry(3),
        vline0_int_entry(4),
        vline0_int_entry(5),
+       dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn30 = {
index 2313a5664f44ba93230a35e28a8801aa784e7f8d..40fd34fb1d5e2bfb76a90570462e9a030e2506e1 100644 (file)
@@ -50,6 +50,8 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
                return DC_IRQ_SOURCE_VBLANK5;
        case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
                return DC_IRQ_SOURCE_VBLANK6;
+       case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
+               return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
        case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
                return DC_IRQ_SOURCE_DC1_VLINE0;
        case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
@@ -166,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
                .ack = NULL
 };
 
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+       .set = NULL,
+       .ack = NULL
+};
+
 static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .set = NULL,
        .ack = NULL
@@ -181,6 +188,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+               BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
                .enable_reg = SRI(reg1, block, reg_num),\
@@ -193,7 +203,26 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
                .ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
 
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
                [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -348,6 +377,7 @@ static const struct irq_source_info irq_source_info_dcn302[DAL_IRQ_SOURCES_NUMBE
                vline0_int_entry(2),
                vline0_int_entry(3),
                vline0_int_entry(4),
+               dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn302 = {
index f07b348f7c29d50c2be1baef966a44a3af935436..44003836fafd6062e1ba04001951dcde24062bb3 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xc29b1734b
+#define DMUB_FW_VERSION_GIT_HASH 0x7f2db1846
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 56
+#define DMUB_FW_VERSION_REVISION 59
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -202,12 +202,7 @@ struct dmub_feature_caps {
         * Max PSR version supported by FW.
         */
        uint8_t psr;
-#ifndef TRIM_FAMS
-       uint8_t fw_assisted_mclk_switch;
-       uint8_t reserved[6];
-#else
        uint8_t reserved[7];
-#endif
 };
 
 #if defined(__cplusplus)
@@ -532,10 +527,6 @@ enum dmub_cmd_type {
         * Command type used for OUTBOX1 notification enable
         */
        DMUB_CMD__OUTBOX1_ENABLE = 71,
-#ifndef TRIM_FAMS
-       DMUB_CMD__FW_ASSISTED_MCLK_SWITCH = 76,
-#endif
-
        /**
         * Command type used for all VBIOS interface commands.
         */
@@ -1115,13 +1106,6 @@ enum dmub_cmd_psr_type {
        DMUB_CMD__PSR_FORCE_STATIC              = 5,
 };
 
-#ifndef TRIM_FAMS
-enum dmub_cmd_fams_type {
-       DMUB_CMD__FAMS_SETUP_FW_CTRL    = 0,
-       DMUB_CMD__FAMS_DRR_UPDATE               = 1,
-};
-#endif
-
 /**
  * PSR versions.
  */
@@ -1245,6 +1229,19 @@ struct dmub_cmd_psr_copy_settings_data {
         * Length of each horizontal line in us.
         */
        uint32_t line_time_in_us;
+       /**
+        * FEC enable status in driver
+        */
+       uint8_t fec_enable_status;
+       /**
+        * FEC re-enable delay when PSR exit.
+        * unit is 100us, range form 0~255(0xFF).
+        */
+       uint8_t fec_enable_delay_in100us;
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t pad3[2];
 };
 
 /**
@@ -1791,24 +1788,6 @@ struct dmub_rb_cmd_drr_update {
                struct dmub_optc_state dmub_optc_state_req;
 };
 
-#ifndef TRIM_FAMS
-struct dmub_cmd_fw_assisted_mclk_switch_pipe_data {
-       uint32_t pix_clk_100hz;
-       uint32_t min_refresh_in_uhz;
-       uint32_t max_ramp_step;
-};
-
-struct dmub_cmd_fw_assisted_mclk_switch_config {
-       uint32_t fams_enabled;
-       struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_STREAMS];
-};
-
-struct dmub_rb_cmd_fw_assisted_mclk_switch {
-       struct dmub_cmd_header header;
-       struct dmub_cmd_fw_assisted_mclk_switch_config config_data;
-};
-#endif
-
 /**
  * Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
  */
@@ -1951,9 +1930,6 @@ union dmub_rb_cmd {
         */
        struct dmub_rb_cmd_query_feature_caps query_feature_caps;
        struct dmub_rb_cmd_drr_update drr_update;
-#ifndef TRIM_FAMS
-       struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
-#endif
        /**
         * Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
         */
index 8ba0a9e2da541ec07088ff2b7ffc5883253a6210..1cbb125b4063b1c5e2508dbf78f51ae45a6b5b35 100644 (file)
@@ -415,6 +415,12 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
        if (!dmub->sw_init)
                return DMUB_STATUS_INVALID;
 
+       if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
+               !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+               ASSERT(0);
+               return DMUB_STATUS_INVALID;
+       }
+
        dmub->fb_base = params->fb_base;
        dmub->fb_offset = params->fb_offset;
        dmub->psp_version = params->psp_version;
@@ -422,101 +428,89 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
        if (dmub->hw_funcs.reset)
                dmub->hw_funcs.reset(dmub);
 
-       if (inst_fb && data_fb) {
-               cw0.offset.quad_part = inst_fb->gpu_addr;
-               cw0.region.base = DMUB_CW0_BASE;
-               cw0.region.top = cw0.region.base + inst_fb->size - 1;
-
-               cw1.offset.quad_part = stack_fb->gpu_addr;
-               cw1.region.base = DMUB_CW1_BASE;
-               cw1.region.top = cw1.region.base + stack_fb->size - 1;
-
-               if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
-                   /**
-                    * Read back all the instruction memory so we don't hang the
-                    * DMCUB when backdoor loading if the write from x86 hasn't been
-                    * flushed yet. This only occurs in backdoor loading.
-                    */
-                   dmub_flush_buffer_mem(inst_fb);
-                   dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
-               }
-
-       }
-
-       if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
-           fw_state_fb && scratch_mem_fb) {
-               cw2.offset.quad_part = data_fb->gpu_addr;
-               cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
-               cw2.region.top = cw2.region.base + data_fb->size;
-
-               cw3.offset.quad_part = bios_fb->gpu_addr;
-               cw3.region.base = DMUB_CW3_BASE;
-               cw3.region.top = cw3.region.base + bios_fb->size;
+       cw0.offset.quad_part = inst_fb->gpu_addr;
+       cw0.region.base = DMUB_CW0_BASE;
+       cw0.region.top = cw0.region.base + inst_fb->size - 1;
 
-               cw4.offset.quad_part = mail_fb->gpu_addr;
-               cw4.region.base = DMUB_CW4_BASE;
-               cw4.region.top = cw4.region.base + mail_fb->size;
+       cw1.offset.quad_part = stack_fb->gpu_addr;
+       cw1.region.base = DMUB_CW1_BASE;
+       cw1.region.top = cw1.region.base + stack_fb->size - 1;
 
+       if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
                /**
-                * Doubled the mailbox region to accomodate inbox and outbox.
-                * Note: Currently, currently total mailbox size is 16KB. It is split
-                * equally into 8KB between inbox and outbox. If this config is
-                * changed, then uncached base address configuration of outbox1
-                * has to be updated in funcs->setup_out_mailbox.
+                * Read back all the instruction memory so we don't hang the
+                * DMCUB when backdoor loading if the write from x86 hasn't been
+                * flushed yet. This only occurs in backdoor loading.
                 */
-               inbox1.base = cw4.region.base;
-               inbox1.top = cw4.region.base + DMUB_RB_SIZE;
-               outbox1.base = inbox1.top;
-               outbox1.top = cw4.region.top;
+               dmub_flush_buffer_mem(inst_fb);
+               dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+       }
 
-               cw5.offset.quad_part = tracebuff_fb->gpu_addr;
-               cw5.region.base = DMUB_CW5_BASE;
-               cw5.region.top = cw5.region.base + tracebuff_fb->size;
+       cw2.offset.quad_part = data_fb->gpu_addr;
+       cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+       cw2.region.top = cw2.region.base + data_fb->size;
 
-               outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
-               outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
+       cw3.offset.quad_part = bios_fb->gpu_addr;
+       cw3.region.base = DMUB_CW3_BASE;
+       cw3.region.top = cw3.region.base + bios_fb->size;
 
+       cw4.offset.quad_part = mail_fb->gpu_addr;
+       cw4.region.base = DMUB_CW4_BASE;
+       cw4.region.top = cw4.region.base + mail_fb->size;
 
-               cw6.offset.quad_part = fw_state_fb->gpu_addr;
-               cw6.region.base = DMUB_CW6_BASE;
-               cw6.region.top = cw6.region.base + fw_state_fb->size;
+       /**
+        * Doubled the mailbox region to accomodate inbox and outbox.
+        * Note: Currently, currently total mailbox size is 16KB. It is split
+        * equally into 8KB between inbox and outbox. If this config is
+        * changed, then uncached base address configuration of outbox1
+        * has to be updated in funcs->setup_out_mailbox.
+        */
+       inbox1.base = cw4.region.base;
+       inbox1.top = cw4.region.base + DMUB_RB_SIZE;
+       outbox1.base = inbox1.top;
+       outbox1.top = cw4.region.top;
 
-               dmub->fw_state = fw_state_fb->cpu_addr;
+       cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+       cw5.region.base = DMUB_CW5_BASE;
+       cw5.region.top = cw5.region.base + tracebuff_fb->size;
 
-               dmub->scratch_mem_fb = *scratch_mem_fb;
+       outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
+       outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
 
-               if (dmub->hw_funcs.setup_windows)
-                       dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
-                                                    &cw5, &cw6);
+       cw6.offset.quad_part = fw_state_fb->gpu_addr;
+       cw6.region.base = DMUB_CW6_BASE;
+       cw6.region.top = cw6.region.base + fw_state_fb->size;
 
-               if (dmub->hw_funcs.setup_outbox0)
-                       dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
+       dmub->fw_state = fw_state_fb->cpu_addr;
 
-               if (dmub->hw_funcs.setup_mailbox)
-                       dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
-               if (dmub->hw_funcs.setup_out_mailbox)
-                       dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
-       }
+       dmub->scratch_mem_fb = *scratch_mem_fb;
 
-       if (mail_fb) {
-               dmub_memset(&rb_params, 0, sizeof(rb_params));
-               rb_params.ctx = dmub;
-               rb_params.base_address = mail_fb->cpu_addr;
-               rb_params.capacity = DMUB_RB_SIZE;
+       if (dmub->hw_funcs.setup_windows)
+               dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
 
-               dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+       if (dmub->hw_funcs.setup_outbox0)
+               dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
 
-               // Initialize outbox1 ring buffer
-               rb_params.ctx = dmub;
-               rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
-               rb_params.capacity = DMUB_RB_SIZE;
-               dmub_rb_init(&dmub->outbox1_rb, &rb_params);
+       if (dmub->hw_funcs.setup_mailbox)
+               dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+       if (dmub->hw_funcs.setup_out_mailbox)
+               dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
 
-       }
+       dmub_memset(&rb_params, 0, sizeof(rb_params));
+       rb_params.ctx = dmub;
+       rb_params.base_address = mail_fb->cpu_addr;
+       rb_params.capacity = DMUB_RB_SIZE;
+       dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+
+       // Initialize outbox1 ring buffer
+       rb_params.ctx = dmub;
+       rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
+       rb_params.capacity = DMUB_RB_SIZE;
+       dmub_rb_init(&dmub->outbox1_rb, &rb_params);
 
        dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
        outbox0_rb_params.ctx = dmub;
-       outbox0_rb_params.base_address = (void *)((uint64_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
+       outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
        outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
        dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
 
@@ -653,6 +647,8 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
        dmub->hw_funcs.set_gpint(dmub, reg);
 
        for (i = 0; i < timeout_us; ++i) {
+               udelay(1);
+
                if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
                        return DMUB_STATUS_OK;
        }
index 21bbee17c52754603f123ccaabd68921bbfaa487..571fcf23cea92d761c07b77848387971c4268753 100644 (file)
@@ -36,6 +36,9 @@
 #define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
+#define DC_LOG_CURSOR(...) pr_debug("[CURSOR]:"__VA_ARGS__)
+#define DC_LOG_PFLIP(...) pr_debug("[PFLIP]:"__VA_ARGS__)
+#define DC_LOG_VBLANK(...) pr_debug("[VBLANK]:"__VA_ARGS__)
 #define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
 #define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
index 5c67e12b2e5543880f108dc68cffed9f184b2508..ef742d95ef0579a8e0b0a23cd896d39e8993a69c 100644 (file)
@@ -942,7 +942,7 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
 static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
                uint32_t hw_points_num,
                const struct hw_x_point *coordinate_x,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer)
 {
        uint32_t i;
@@ -2027,7 +2027,7 @@ rgb_user_alloc_fail:
 static bool calculate_curve(enum dc_transfer_func_predefined trans,
                                struct dc_transfer_func_distributed_points *points,
                                struct pwl_float_data_ex *rgb_regamma,
-                               const struct freesync_hdr_tf_params *fs_params,
+                               const struct hdr_tm_params *fs_params,
                                uint32_t sdr_ref_white_level,
                                struct calculate_buffer *cal_buffer)
 {
@@ -2106,7 +2106,7 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer)
 {
        struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
index 7563457e2ff49e5cdcc0d648ec995f995e69c480..2893abf482084492f8cbcd50b4ad0cb1238cf463 100644 (file)
@@ -76,7 +76,7 @@ struct regamma_lut {
        };
 };
 
-struct freesync_hdr_tf_params {
+struct hdr_tm_params {
        unsigned int sdr_white_level;
        unsigned int min_content; // luminance in 1/10000 nits
        unsigned int max_content; // luminance in nits
@@ -108,7 +108,7 @@ void precompute_de_pq(void);
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer);
 
 bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
index e5f9d7704a63e4e4144a9db209ceddb0382013dd..3f4f44b44e6a9a9174bb4fbcdfed72043affa0c0 100644 (file)
@@ -118,7 +118,7 @@ static unsigned int calc_duration_in_us_from_v_total(
        return duration_in_us;
 }
 
-static unsigned int calc_v_total_from_refresh(
+unsigned int mod_freesync_calc_v_total_from_refresh(
                const struct dc_stream_state *stream,
                unsigned int refresh_in_uhz)
 {
@@ -280,10 +280,10 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 
                /* Restore FreeSync */
                in_out_vrr->adjust.v_total_min =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                in_out_vrr->adjust.v_total_max =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
        /* BTR set to "active" so engage */
        } else {
@@ -442,16 +442,16 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
        if (update) {
                if (in_out_vrr->fixed.fixed_active) {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(
+                               mod_freesync_calc_v_total_from_refresh(
                                stream, in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
                                        in_out_vrr->adjust.v_total_min;
                } else {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->min_refresh_in_uhz);
                }
        }
@@ -543,8 +543,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
                infopacket->sb[6] |= 0x02;
 
        /* PB6 = [Bit 2 = FreeSync Active] */
-       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
-                       vrr->state == VRR_STATE_ACTIVE_FIXED)
+       if (vrr->state != VRR_STATE_DISABLED &&
+                       vrr->state != VRR_STATE_UNSUPPORTED)
                infopacket->sb[6] |= 0x04;
 
        // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
@@ -1082,10 +1082,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                        refresh_range >= MIN_REFRESH_RANGE) {
 
                in_out_vrr->adjust.v_total_min =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                in_out_vrr->adjust.v_total_max =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
        } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
                in_out_vrr->fixed.target_refresh_in_uhz =
@@ -1099,7 +1099,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                } else {
                        in_out_vrr->fixed.fixed_active = true;
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->fixed.target_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
                                in_out_vrr->adjust.v_total_min;
@@ -1206,10 +1206,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
                /* Restore FreeSync */
                if (in_out_vrr->btr.frame_counter == 0) {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
                }
        }
@@ -1267,6 +1267,21 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
        return nominal_field_rate_in_uhz;
 }
 
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+               unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
+{
+       unsigned long long field_rate_in_uhz = 0;
+       unsigned int total = htotal * vtotal;
+
+       /* Calculate nominal field rate for stream, rounded up to nearest integer */
+       field_rate_in_uhz = pix_clk;
+       field_rate_in_uhz *= 1000000ULL;
+
+       field_rate_in_uhz =     div_u64(field_rate_in_uhz, total);
+
+       return field_rate_in_uhz;
+}
+
 bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
                uint32_t max_refresh_cap_in_uhz,
                uint32_t nominal_field_rate_in_uhz) 
index 20e554e771d1634ba2c2cb14124447adbfe70405..68a6481d7f8f3094862012f80acdc6dee1f7ed7f 100644 (file)
@@ -53,7 +53,7 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
         */
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
                if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-                               !hdcp->displays[i].adjust.disable) {
+                               hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
                        is_auth_needed = 1;
                        break;
                }
@@ -74,7 +74,7 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
         */
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
                if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-                               !hdcp->displays[i].adjust.disable) {
+                               hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
                        is_auth_needed = 1;
                        break;
                }
@@ -314,6 +314,9 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
                goto out;
        }
 
+       /* save current encryption states to restore after next authentication */
+       mod_hdcp_save_current_encryption_states(hdcp);
+
        /* reset existing authentication status */
        status = reset_authentication(hdcp, output);
        if (status != MOD_HDCP_STATUS_SUCCESS)
@@ -360,6 +363,9 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
                goto out;
        }
 
+       /* save current encryption states to restore after next authentication */
+       mod_hdcp_save_current_encryption_states(hdcp);
+
        /* stop current authentication */
        status = reset_authentication(hdcp, output);
        if (status != MOD_HDCP_STATUS_SUCCESS)
index 5c22cf7e6118fb41960e762bf2e41bcdd83f327c..3ce91db560d1d2224025109719b6e8e221e105a5 100644 (file)
@@ -331,6 +331,8 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
                struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
 enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
                struct mod_hdcp *hdcp, uint8_t index);
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp);
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -339,8 +341,6 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
        struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status);
 enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
index 73ca49f05bd3231d29a2b73e0be8c7d3e7fadcb2..eeac14300a2a2df4c6c2622bae914c02bab01fff 100644 (file)
@@ -256,10 +256,12 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
                goto out;
        }
 
-       if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+       mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
                        &input->link_maintenance, &status,
-                       hdcp, "link_maintenance"))
-               goto out;
+                       hdcp, "link_maintenance");
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
@@ -425,19 +427,24 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
                event_ctx->unexpected_event = 1;
                goto out;
        }
-
-       if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
-                       &input->bstatus_read, &status,
-                       hdcp, "bstatus_read"))
-               goto out;
-       if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-                       &input->link_integrity_check, &status,
-                       hdcp, "link_integrity_check"))
-               goto out;
-       if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
-                       &input->reauth_request_check, &status,
-                       hdcp, "reauth_request_check"))
+       if (!mod_hdcp_is_link_encryption_enabled(hdcp))
                goto out;
+
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+                               &input->bstatus_read, &status,
+                               hdcp, "bstatus_read");
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(check_link_integrity_dp,
+                               &input->link_integrity_check, &status,
+                               hdcp, "link_integrity_check");
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+                               &input->reauth_request_check, &status,
+                               hdcp, "reauth_request_check");
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
index 24ab95b093f762b95a3eefb6587cbbec874cc939..3dda8c1d83fc0154b7703d80c1fe008e9c381827 100644 (file)
@@ -93,7 +93,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
                }
                break;
        case H1_A45_AUTHENTICATED:
-               if (input->link_maintenance != PASS) {
+               if (input->link_maintenance == FAIL) {
                        /* 1A-07: consider invalid ri' a failure */
                        /* 1A-07a: consider read ri' not returned a failure */
                        fail_and_restart_in_ms(0, &status, output);
@@ -243,8 +243,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
                }
                break;
        case D1_A4_AUTHENTICATED:
-               if (input->link_integrity_check != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->link_integrity_check == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        /* 1A-07: restart hdcp on a link integrity failure */
                        fail_and_restart_in_ms(0, &status, output);
                        break;
index a0895a7efda2c8451124476627f91e4eeee273e9..f164f6a5d4dc8fc9098fd701b29588466c98522d 100644 (file)
@@ -564,11 +564,13 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
                event_ctx->unexpected_event = 1;
                goto out;
        }
-
-       if (!process_rxstatus(hdcp, event_ctx, input, &status))
-               goto out;
-       if (event_ctx->rx_id_list_ready)
+       if (!mod_hdcp_is_link_encryption_enabled(hdcp))
                goto out;
+
+       process_rxstatus(hdcp, event_ctx, input, &status);
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
index e738c7ae66ec0faca4f79ad471969be73125f37e..b0306ed6d6b485189d8291344624a1f5f37a0740 100644 (file)
@@ -245,8 +245,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
                HDCP_FULL_DDC_TRACE(hdcp);
                break;
        case H2_A5_AUTHENTICATED:
-               if (input->rxstatus_read != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->rxstatus_read == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        fail_and_restart_in_ms(0, &status, output);
                        break;
                } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
@@ -562,11 +562,11 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
                HDCP_FULL_DDC_TRACE(hdcp);
                break;
        case D2_A5_AUTHENTICATED:
-               if (input->rxstatus_read != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->rxstatus_read == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        fail_and_restart_in_ms(0, &status, output);
                        break;
-               } else if (input->link_integrity_check_dp != PASS) {
+               } else if (input->link_integrity_check_dp == FAIL) {
                        if (hdcp->connection.hdcp2_retry_count >= 1)
                                adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
                        fail_and_restart_in_ms(0, &status, output);
index 904ce9b88088eae800e7e5e21681aedac7e12c0b..9d7ca316dc3f9c6b03818b30924a3071f96162a5 100644 (file)
@@ -914,3 +914,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        return status;
 }
 
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp)
+{
+       /* unsupported */
+       return true;
+}
+
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp)
+{
+       /* unsupported */
+}
index b64cd5bdc7b5051fc7d75781ee5ef2bf24681a49..75a158a2514cdf3145d7671201b152ad513b8fb5 100644 (file)
@@ -171,10 +171,15 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
 unsigned long long mod_freesync_calc_nominal_field_rate(
                        const struct dc_stream_state *stream);
 
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+               unsigned int vtotal, unsigned int htotal, unsigned int pix_clk);
+
 bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
                uint32_t max_refresh_cap_in_uhz,
                uint32_t nominal_field_rate_in_uhz);
 
-
+unsigned int mod_freesync_calc_v_total_from_refresh(
+               const struct dc_stream_state *stream,
+               unsigned int refresh_in_uhz);
 
 #endif
index d223ed3be5d3deca64218cc5752c75472c80d9aa..acbeada5215b0c5e5fffbb137e05b966c413f310 100644 (file)
@@ -120,6 +120,12 @@ enum mod_hdcp_display_state {
        MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
 };
 
+enum mod_hdcp_display_disable_option {
+       MOD_HDCP_DISPLAY_NOT_DISABLE = 0,
+       MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION,
+       MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
+};
+
 struct mod_hdcp_ddc {
        void *handle;
        struct {
@@ -149,8 +155,8 @@ struct mod_hdcp_psp {
 };
 
 struct mod_hdcp_display_adjustment {
-       uint8_t disable                 : 1;
-       uint8_t reserved                : 7;
+       uint8_t disable                 : 2;
+       uint8_t reserved                : 6;
 };
 
 struct mod_hdcp_link_adjustment_hdcp1 {
@@ -255,8 +261,6 @@ struct mod_hdcp_config {
        uint8_t index;
 };
 
-struct mod_hdcp;
-
 /* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
 size_t mod_hdcp_get_memory_size(void);
 
index 0102487a2c5f049f6cc4f2f18db97b94e9e9daa1..f21554a1c86c1470505c0d4be66a2eccb118bdc2 100644 (file)
 #define mmCP_CE_IB2_BASE_HI_BASE_IDX                                                                   1
 #define mmCP_CE_IB2_BUFSZ                                                                              0x20cb
 #define mmCP_CE_IB2_BUFSZ_BASE_IDX                                                                     1
+#define mmCP_IB1_BASE_LO                                                                               0x20cc
+#define mmCP_IB1_BASE_LO_BASE_IDX                                                                      1
+#define mmCP_IB1_BASE_HI                                                                               0x20cd
+#define mmCP_IB1_BASE_HI_BASE_IDX                                                                      1
+#define mmCP_IB1_BUFSZ                                                                                 0x20ce
+#define mmCP_IB1_BUFSZ_BASE_IDX                                                                        1
 #define mmCP_IB2_BASE_LO                                                                               0x20cf
 #define mmCP_IB2_BASE_LO_BASE_IDX                                                                      1
 #define mmCP_IB2_BASE_HI                                                                               0x20d0
index 4d2a1432c12104b828bb6a1b4013d4c3be2814d2..a827b0ff890519454013488920a7443661039f94 100644 (file)
 //CP_CE_IB2_BUFSZ
 #define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT                                                                     0x0
 #define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK                                                                       0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT                                                                    0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK                                                                      0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT                                                                    0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK                                                                      0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT                                                                        0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK                                                                          0x000FFFFFL
 //CP_IB2_BASE_LO
 #define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT                                                                    0x2
 #define CP_IB2_BASE_LO__IB2_BASE_LO_MASK                                                                      0xFFFFFFFCL
index c1d7b1d0b952080c0a7ec57156488b53caa03285..47eb84598b96b22842cd1806e4175b3fbd74e900 100644 (file)
@@ -1987,9 +1987,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
 #define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
 #define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
 #define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
-#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct definition for 36bpp should be 2 for 36bpp(2:1)
 #define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
-#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct definition for 30bpp should be 1 for 36bpp(5:4)
 #define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
 #define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
 #define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK            0x40
index 58364a8eb1f3e5e3902cc94842d5eceab48699a1..c77ed38c20fbd75c89c3c1d3ec515a24b746813d 100644 (file)
@@ -981,6 +981,40 @@ struct atom_display_controller_info_v4_2
   uint8_t  reserved3[8];
 };
 
+struct atom_display_controller_info_v4_3
+{
+  struct  atom_common_table_header  table_header;
+  uint32_t display_caps;
+  uint32_t bootup_dispclk_10khz;
+  uint16_t dce_refclk_10khz;
+  uint16_t i2c_engine_refclk_10khz;
+  uint16_t dvi_ss_percentage;       // in unit of 0.001%
+  uint16_t dvi_ss_rate_10hz;
+  uint16_t hdmi_ss_percentage;      // in unit of 0.001%
+  uint16_t hdmi_ss_rate_10hz;
+  uint16_t dp_ss_percentage;        // in unit of 0.001%
+  uint16_t dp_ss_rate_10hz;
+  uint8_t  dvi_ss_mode;             // enum of atom_spread_spectrum_mode
+  uint8_t  hdmi_ss_mode;            // enum of atom_spread_spectrum_mode
+  uint8_t  dp_ss_mode;              // enum of atom_spread_spectrum_mode
+  uint8_t  ss_reserved;
+  uint8_t  dfp_hardcode_mode_num;   // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
+  uint8_t  dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
+  uint8_t  vga_hardcode_mode_num;   // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+  uint8_t  vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+  uint16_t dpphy_refclk_10khz;
+  uint16_t reserved2;
+  uint8_t  dcnip_min_ver;
+  uint8_t  dcnip_max_ver;
+  uint8_t  max_disp_pipe_num;
+  uint8_t  max_vbios_active_disp_pipe_num;
+  uint8_t  max_ppll_num;
+  uint8_t  max_disp_phy_num;
+  uint8_t  max_aux_pairs;
+  uint8_t  remotedisplayconfig;
+  uint8_t  reserved3[8];
+};
+
 struct atom_display_controller_info_v4_4 {
        struct atom_common_table_header table_header;
        uint32_t display_caps;
@@ -1043,7 +1077,9 @@ enum dce_info_caps_def
   DCE_INFO_CAPS_DISABLE_DFP_DP_HBR2      =0x04,
   // only for VBIOS
   DCE_INFO_CAPS_ENABLE_INTERLAC_TIMING   =0x08,
-
+  // only for VBIOS
+  DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE    =0x20,
+  DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE = 0x40,
 };
 
 /* 
index e2bffcae273a5f255c10f5c2e412c005437258a6..754170a86ea4d0cee9e7c5cd44fd889886e0e07e 100644 (file)
 
 #define DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT       0x68
 #define DCN_1_0__CTXID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT       6
+#define DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT        0x68 // DMCUB_IHC_outbox1_ready_int IHC_DMCUB_outbox1_ready_int_ack DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT DISP_INTERRUPT_STATUS_CONTINUE24 Level/Pulse
+#define DCN_1_0__CTXID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT        8
 
 #endif // __IRQSRCS_DCN_1_0_H__
index dd695817c4816db75ded18253df2713cba6afec6..3534686670362964e61ad3037982b23e414f341c 100644 (file)
@@ -242,6 +242,9 @@ struct pp_display_clock_request;
 struct pp_clock_levels_with_voltage;
 struct pp_clock_levels_with_latency;
 struct amd_pp_clocks;
+struct pp_smu_wm_range_sets;
+struct pp_smu_nv_clock_table;
+struct dpm_clocks;
 
 struct amd_pm_funcs {
 /* export for dpm on ci and si */
@@ -336,6 +339,17 @@ struct amd_pm_funcs {
        int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
        int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
        ssize_t (*get_gpu_metrics)(void *handle, void **table);
+       int (*set_watermarks_for_clock_ranges)(void *handle,
+                                              struct pp_smu_wm_range_sets *ranges);
+       int (*display_disable_memory_clock_switch)(void *handle,
+                                                  bool disable_memory_clock_switch);
+       int (*get_max_sustainable_clocks_by_dc)(void *handle,
+                                               struct pp_smu_nv_clock_table *max_clocks);
+       int (*get_uclk_dpm_states)(void *handle,
+                                  unsigned int *clock_values_in_khz,
+                                  unsigned int *num_states);
+       int (*get_dpm_clock_table)(void *handle,
+                                  struct dpm_clocks *clock_table);
 };
 
 struct metrics_table_header {
index 0a6bb3311f0fc88cf8d487b7584d82ae5a48753d..03581d5b183607862828e07ba92ae1edce66de99 100644 (file)
@@ -927,7 +927,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 {
        int ret = 0;
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       bool swsmu = is_support_sw_smu(adev);
 
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
@@ -968,15 +967,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
        case AMD_IP_BLOCK_TYPE_GFX:
        case AMD_IP_BLOCK_TYPE_VCN:
        case AMD_IP_BLOCK_TYPE_SDMA:
-               if (pp_funcs && pp_funcs->set_powergating_by_smu) {
-                       ret = (pp_funcs->set_powergating_by_smu(
-                               (adev)->powerplay.pp_handle, block_type, gate));
-               }
-               break;
        case AMD_IP_BLOCK_TYPE_JPEG:
-               if (swsmu)
-                       ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
-               break;
        case AMD_IP_BLOCK_TYPE_GMC:
        case AMD_IP_BLOCK_TYPE_ACP:
                if (pp_funcs && pp_funcs->set_powergating_by_smu) {
@@ -1606,7 +1597,10 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
                        pr_err("smu firmware loading failed\n");
                        return r;
                }
-               *smu_version = adev->pm.fw_version;
+
+               if (smu_version)
+                       *smu_version = adev->pm.fw_version;
        }
+
        return 0;
 }
index 2627870a786ed0e4cbd19f0c264d7da48b321679..204e3454901346bc1871ba90d8943484605f9575 100644 (file)
@@ -27,7 +27,6 @@
 #include "amdgpu_drv.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
-#include "amdgpu_smu.h"
 #include "atom.h"
 #include <linux/pci.h>
 #include <linux/hwmon.h>
@@ -129,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -145,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
-                       (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+       return sysfs_emit(buf, "%s\n",
+                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
@@ -162,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("battery", buf, strlen("battery")) == 0)
                state = POWER_STATE_TYPE_BATTERY;
@@ -268,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -283,17 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-                       (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
-                       (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
-                       (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
-                       (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
-                       "unknown");
+       return sysfs_emit(buf, "%s\n",
+                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+                         (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
+                         "unknown");
 }
 
 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
@@ -310,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -408,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -448,6 +457,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -472,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
        if (i == data.nums)
                i = -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", i);
+       return sysfs_emit(buf, "%d\n", i);
 }
 
 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
@@ -484,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->pp_force_state_enabled)
                return amdgpu_get_pp_cur_state(dev, attr, buf);
        else
-               return snprintf(buf, PAGE_SIZE, "\n");
+               return sysfs_emit(buf, "\n");
 }
 
 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
@@ -504,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
@@ -564,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -602,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -764,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (count > 127)
                return -EINVAL;
@@ -865,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -916,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtou64(buf, 0, &featuremask);
        if (ret)
@@ -927,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+       if (adev->powerplay.pp_funcs->set_ppfeature_status) {
                ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
                if (ret) {
                        pm_runtime_mark_last_busy(ddev->dev);
@@ -959,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1018,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1083,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
@@ -1239,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1254,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
@@ -1269,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1312,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1327,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
@@ -1342,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1405,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1443,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        tmp[0] = *(buf);
        tmp[1] = '\0';
@@ -1506,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1523,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -1544,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1561,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -1587,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->flags & AMD_IS_APU)
                return -ENODATA;
@@ -1605,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
-                       count0, count1, pcie_get_mps(adev->pdev));
+       return sysfs_emit(buf, "%llu %llu %i\n",
+                         count0, count1, pcie_get_mps(adev->pdev));
 }
 
 /**
@@ -1628,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->unique_id)
-               return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
+               return sysfs_emit(buf, "%016llx\n", adev->unique_id);
 
        return 0;
 }
@@ -1657,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
-                       adev_to_drm(adev)->unique,
-                       atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
-                       adev->throttling_logging_rs.interval / HZ + 1);
+       return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
+                         adev_to_drm(adev)->unique,
+                         atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+                         adev->throttling_logging_rs.interval / HZ + 1);
 }
 
 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
@@ -1726,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1954,6 +2000,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
@@ -1991,7 +2039,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
@@ -2007,7 +2055,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
@@ -2023,7 +2071,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
@@ -2039,7 +2087,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_mem_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
@@ -2051,7 +2099,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
+       return sysfs_emit(buf, "%s\n", temp_label[channel].label);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
@@ -2077,7 +2125,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
                break;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
@@ -2090,6 +2138,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2122,6 +2172,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2172,6 +2224,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2220,6 +2274,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2253,6 +2309,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2285,6 +2343,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2301,7 +2361,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
+       return sysfs_emit(buf, "%d\n", min_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
@@ -2315,6 +2375,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2331,7 +2393,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
+       return sysfs_emit(buf, "%d\n", max_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
@@ -2344,6 +2406,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2376,6 +2440,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2422,6 +2488,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2455,6 +2523,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2496,6 +2566,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2513,14 +2585,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
+       return sysfs_emit(buf, "%d\n", vddgfx);
 }
 
 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddgfx\n");
+       return sysfs_emit(buf, "vddgfx\n");
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
@@ -2533,6 +2605,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        /* only APUs have vddnb */
        if  (!(adev->flags & AMD_IS_APU))
@@ -2554,14 +2628,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
+       return sysfs_emit(buf, "%d\n", vddnb);
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddnb\n");
+       return sysfs_emit(buf, "vddnb\n");
 }
 
 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
@@ -2575,6 +2649,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2595,7 +2671,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
        /* convert to microwatts */
        uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", uw);
+       return sysfs_emit(buf, "%u\n", uw);
 }
 
 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
@@ -2619,6 +2695,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2656,6 +2734,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2693,6 +2773,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2722,7 +2804,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
 {
        int limit_type = to_sensor_dev_attr(attr)->index;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
 }
 
@@ -2739,6 +2821,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (amdgpu_sriov_vf(adev))
                return -EINVAL;
@@ -2780,6 +2864,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2797,14 +2883,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "sclk\n");
+       return sysfs_emit(buf, "sclk\n");
 }
 
 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
@@ -2817,6 +2903,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2834,14 +2922,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "mclk\n");
+       return sysfs_emit(buf, "mclk\n");
 }
 
 /**
@@ -3390,6 +3478,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
index 433dd1e9ec4f819a7da73fac7f4d7c609ab7ff0d..610266088ff1c9668fa94a15bb955a3c2b7fbb11 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow  0x41
 
-#define PPSMC_Message_Count                      0x42
+#define PPSMC_MSG_GfxDriverResetRecovery       0x42
+#define PPSMC_Message_Count                    0x43
 
 //PPSMC Reset Types
 #define PPSMC_RESET_TYPE_WARM_RESET              0x00
index 25d5f03aaa679a4b94d049bfe84691d1ca3e22ae..8bb224f6c762d38bcc1fa80b86ea16e0370158af 100644 (file)
@@ -195,6 +195,11 @@ struct smu_user_dpm_profile {
        uint32_t clk_dependency;
 };
 
+enum smu_event_type {
+
+       SMU_EVENT_RESET_COMPLETE = 0,
+};
+
 #define SMU_TABLE_INIT(tables, table_id, s, a, d)      \
        do {                                            \
                tables[table_id].size = s;              \
@@ -338,7 +343,6 @@ struct smu_power_context {
        struct smu_power_gate power_gate;
 };
 
-
 #define SMU_FEATURE_MAX        (64)
 struct smu_feature
 {
@@ -806,6 +810,13 @@ struct pptable_funcs {
         */
        int (*check_fw_status)(struct smu_context *smu);
 
+       /**
+        * @set_mp1_state: put SMU into a correct state for comming
+        *                 resume from runpm or gpu reset.
+        */
+       int (*set_mp1_state)(struct smu_context *smu,
+                            enum pp_mp1_state mp1_state);
+
        /**
         * @setup_pptable: Initialize the power play table and populate it with
         *                 default values.
@@ -1160,6 +1171,12 @@ struct pptable_funcs {
         * @set_light_sbr:  Set light sbr mode for the SMU.
         */
        int (*set_light_sbr)(struct smu_context *smu, bool enable);
+
+       /**
+        * @wait_for_event:  Wait for events from SMU.
+        */
+       int (*wait_for_event)(struct smu_context *smu,
+                             enum smu_event_type event, uint64_t event_arg);
 };
 
 typedef enum {
@@ -1235,64 +1252,13 @@ enum smu_cmn2asic_mapping_type {
        [profile] = {1, (workload)}
 
 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
-int smu_load_microcode(struct smu_context *smu);
-
-int smu_check_fw_status(struct smu_context *smu);
-
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
-
 int smu_get_power_limit(struct smu_context *smu,
                        uint32_t *limit,
                        enum smu_ppt_limit_level limit_level);
 
-int smu_set_power_limit(void *handle, uint32_t limit);
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf);
-
-int smu_od_edit_dpm_table(void *handle,
-                         enum PP_OD_DPM_TABLE_COMMAND type,
-                         long *input, uint32_t size);
-
-int smu_read_sensor(void *handle, int sensor, void *data, int *size);
-int smu_get_power_profile_mode(void *handle, char *buf);
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size);
-u32 smu_get_fan_control_mode(void *handle);
-int smu_set_fan_control_mode(struct smu_context *smu, int value);
-void smu_pp_set_fan_control_mode(void *handle, u32 value);
-int smu_get_fan_speed_percent(void *handle, u32 *speed);
-int smu_set_fan_speed_percent(void *handle, u32 speed);
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed);
-
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
-
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
-                                      enum smu_clk_type clk_type,
-                                      struct pp_clock_levels_with_latency *clocks);
-
-int smu_display_clock_voltage_request(struct smu_context *smu,
-                                     struct pp_display_clock_request *clock_req);
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
-
-int smu_set_xgmi_pstate(void *handle,
-                       uint32_t pstate);
-
-int smu_set_azalia_d3_pme(struct smu_context *smu);
-
-bool smu_baco_is_support(struct smu_context *smu);
-int smu_get_baco_capability(void *handle, bool *cap);
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
-
-int smu_baco_enter(struct smu_context *smu);
-int smu_baco_exit(struct smu_context *smu);
-int smu_baco_set_state(void *handle, int state);
-
-
 bool smu_mode1_reset_is_support(struct smu_context *smu);
 bool smu_mode2_reset_is_support(struct smu_context *smu);
 int smu_mode1_reset(struct smu_context *smu);
-int smu_mode2_reset(void *handle);
 
 extern const struct amd_ip_funcs smu_ip_funcs;
 
@@ -1302,68 +1268,24 @@ extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_cclk_dpm(struct amdgpu_device *adev);
-int smu_reset(struct smu_context *smu);
-int smu_sys_get_pp_table(void *handle, char **table);
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size);
-int smu_get_power_num_states(void *handle, struct pp_states_info *state_info);
-enum amd_pm_state_type smu_get_current_power_state(void *handle);
 int smu_write_watermarks_table(struct smu_context *smu);
-int smu_set_watermarks_for_clock_ranges(
-               struct smu_context *smu,
-               struct pp_smu_wm_range_sets *clock_ranges);
-
-/* smu to display interface */
-extern int smu_display_configuration_change(struct smu_context *smu, const
-                                           struct amd_pp_display_configuration
-                                           *display_config);
-extern int smu_dpm_set_power_gate(void *handle, uint32_t block_type, bool gate);
-extern int smu_handle_task(struct smu_context *smu,
-                          enum amd_dpm_forced_level level,
-                          enum amd_pp_task task_id,
-                          bool lock_needed);
-extern int smu_handle_dpm_task(void *handle,
-                              enum amd_pp_task task_id,
-                              enum amd_pm_state_type *user_state);
-int smu_switch_power_profile(void *handle,
-                            enum PP_SMC_POWER_PROFILE type,
-                            bool en);
+
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                           uint32_t *min, uint32_t *max);
-u32 smu_get_mclk(void *handle, bool low);
-u32 smu_get_sclk(void *handle, bool low);
+
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                            uint32_t min, uint32_t max);
-enum amd_dpm_forced_level smu_get_performance_level(void *handle);
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level);
-int smu_set_display_count(struct smu_context *smu, uint32_t count);
-int smu_set_ac_dc(struct smu_context *smu);
-int smu_sys_get_pp_feature_mask(void *handle, char *buf);
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask);
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask);
-int smu_set_mp1_state(void *handle,
-                     enum pp_mp1_state mp1_state);
-int smu_set_df_cstate(void *handle,
-                     enum pp_df_cstate state);
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
-                                        struct pp_smu_nv_clock_table *max_clocks);
-
-int smu_get_uclk_dpm_states(struct smu_context *smu,
-                           unsigned int *clock_values_in_khz,
-                           unsigned int *num_states);
+int smu_set_ac_dc(struct smu_context *smu);
 
-int smu_get_dpm_clock_table(struct smu_context *smu,
-                           struct dpm_clocks *clock_table);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
 
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table);
-
-int smu_enable_mgpu_fan_boost(void *handle);
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state);
-
 int smu_set_light_sbr(struct smu_context *smu, bool enable);
 
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+                      uint64_t event_arg);
+
 #endif
 #endif
index df2ead254f37e4ecad14d67b4ee0921ca13f6fbd..d23533bda0026dd2ec77261930554dfdbb6fe35e 100644 (file)
@@ -435,8 +435,12 @@ typedef struct {
   uint8_t  GpioI2cSda; // Serial Data
   uint16_t spare5;
 
+  uint16_t XgmiMaxCurrent; // in Amps
+  int8_t   XgmiOffset;     // in Amps
+  uint8_t  Padding_TelemetryXgmi;
+
   //reserved
-  uint32_t reserved[16];
+  uint32_t reserved[15];
 
 } PPTable_t;
 
@@ -481,7 +485,10 @@ typedef struct {
   uint16_t TemperatureAllHBM[4]  ;
   uint32_t GfxBusyAcc            ;
   uint32_t DramBusyAcc           ;
-  uint32_t Spare[4];
+  uint32_t EnergyAcc64bitLow     ; //15.259uJ resolution
+  uint32_t EnergyAcc64bitHigh    ;
+  uint32_t TimeStampLow          ; //10ns resolution
+  uint32_t TimeStampHigh         ;
 
   // Padding - ignore
   uint32_t     MmHubPadding[8]; // SMU internal use
index 5bfb60f41dd422b92d685aea3db2c0a2df1f0c73..89a16dcd0fff90abd2c7bffbd25ab357a1797c23 100644 (file)
        __SMU_DUMMY_MAP(DisableDeterminism),            \
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
+       __SMU_DUMMY_MAP(GfxDriverResetRecovery),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index ad4db2edf1fb2d140a650d3bd6a12b230830fa80..d5182bbaa59834fc4bf42461a395a84dc0378156 100644 (file)
@@ -61,8 +61,8 @@
 #define LINK_WIDTH_MAX                 6
 #define LINK_SPEED_MAX                 3
 
-static __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
+static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
 
 static const
 struct smu_temperature_range __maybe_unused smu11_thermal_policy[] =
index 80208e1eefc95b82c9852e730c1d322c37bd8f6d..8145e1cbf181eeb87318687920cb6ecd04f6081c 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x5
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x6
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
@@ -268,5 +268,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu);
 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
                              bool enablement);
 
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+                            uint64_t event_arg);
+
 #endif
 #endif
index f5d59fa3a0308693063ed141aa7f5c74ddc7dc7a..f5fe540cd5366b4cbf619b33207e08470f6239ba 100644 (file)
@@ -1297,19 +1297,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               if (has_gfx_busy) {
+               if (!has_gfx_busy)
+                       ret = -EOPNOTSUPP;
+               else {
                        ret = smum_send_msg_to_smc(hwmgr,
                                                   PPSMC_MSG_GetGfxBusy,
                                                   &activity_percent);
                        if (!ret)
-                               activity_percent = activity_percent > 100 ? 100 : activity_percent;
+                               *((uint32_t *)value) = min(activity_percent, (u32)100);
                        else
-                               return -EIO;
-                       *((uint32_t *)value) = activity_percent;
-                       return 0;
-               } else {
-                       return -EOPNOTSUPP;
+                               ret = -EIO;
                }
+               break;
        default:
                ret = -EOPNOTSUPP;
                break;
index 7edafef095a3a3c0874a9bbe43ccbc96a62bd3bd..0541bfc81c1b4496ff0458fb11a73bfc56bd73a3 100644 (file)
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                    (hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12) ||
-                   (hwmgr->chip_id == CHIP_TONGA))
+                   (hwmgr->chip_id == CHIP_TONGA) ||
+                   (hwmgr->chip_id == CHIP_TOPAZ))
                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
 
@@ -3330,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
                                                !hwmgr->display_config->multi_monitor_in_sync) ||
-                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+                                               (hwmgr->display_config->num_display &&
+                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
        disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
                                         disable_mclk_switching_for_display;
index b6d7b7b224a9057016b6659ea4755b2bec024dd4..1a097e608808e21561c7ea2f3850ce8be701e18c 100644 (file)
@@ -52,8 +52,8 @@
 
 #define LINK_WIDTH_MAX                         6
 #define LINK_SPEED_MAX                         3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask);
index 213c9c6b446238566d99e3fa0b341e45510201d8..d3177a534fdf07d335397160cc92a68abc08ddc4 100644 (file)
@@ -57,8 +57,8 @@
 
 #define LINK_WIDTH_MAX                         6
 #define LINK_SPEED_MAX                         3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
 {
index e722adcf2f534321295bacf37e6d6075a2981eb6..e0eb7ca112e271db9f4c4ed7163ec555cf8e8886 100644 (file)
@@ -51,8 +51,19 @@ static const struct amd_pm_funcs swsmu_pm_funcs;
 static int smu_force_smuclk_levels(struct smu_context *smu,
                                   enum smu_clk_type clk_type,
                                   uint32_t mask);
-
-int smu_sys_get_pp_feature_mask(void *handle, char *buf)
+static int smu_handle_task(struct smu_context *smu,
+                          enum amd_dpm_forced_level level,
+                          enum amd_pp_task task_id,
+                          bool lock_needed);
+static int smu_reset(struct smu_context *smu);
+static int smu_set_fan_speed_percent(void *handle, u32 speed);
+static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+static int smu_sys_get_pp_feature_mask(void *handle,
+                                      char *buf)
 {
        struct smu_context *smu = handle;
        int size = 0;
@@ -69,7 +80,8 @@ int smu_sys_get_pp_feature_mask(void *handle, char *buf)
        return size;
 }
 
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask)
+static int smu_sys_set_pp_feature_mask(void *handle,
+                                      uint64_t new_mask)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -142,7 +154,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
        return ret;
 }
 
-u32 smu_get_mclk(void *handle, bool low)
+static u32 smu_get_mclk(void *handle, bool low)
 {
        struct smu_context *smu = handle;
        uint32_t clk_freq;
@@ -156,7 +168,7 @@ u32 smu_get_mclk(void *handle, bool low)
        return clk_freq * 100;
 }
 
-u32 smu_get_sclk(void *handle, bool low)
+static u32 smu_get_sclk(void *handle, bool low)
 {
        struct smu_context *smu = handle;
        uint32_t clk_freq;
@@ -256,8 +268,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
  *    Under this case, the smu->mutex lock protection is already enforced on
  *    the parent API smu_force_performance_level of the call path.
  */
-int smu_dpm_set_power_gate(void *handle, uint32_t block_type,
-                          bool gate)
+static int smu_dpm_set_power_gate(void *handle,
+                                 uint32_t block_type,
+                                 bool gate)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -406,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
        smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
 }
 
-int smu_get_power_num_states(void *handle,
-                            struct pp_states_info *state_info)
+static int smu_get_power_num_states(void *handle,
+                                   struct pp_states_info *state_info)
 {
        if (!state_info)
                return -EINVAL;
@@ -442,7 +455,8 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
 }
 
 
-int smu_sys_get_pp_table(void *handle, char **table)
+static int smu_sys_get_pp_table(void *handle,
+                               char **table)
 {
        struct smu_context *smu = handle;
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -468,7 +482,9 @@ int smu_sys_get_pp_table(void *handle, char **table)
        return powerplay_table_size;
 }
 
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size)
+static int smu_sys_set_pp_table(void *handle,
+                               const char *buf,
+                               size_t size)
 {
        struct smu_context *smu = handle;
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -632,6 +648,7 @@ err0_out:
        return ret;
 }
 
+
 static int smu_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1337,7 +1354,7 @@ static int smu_disable_dpms(struct smu_context *smu)
        bool use_baco = !smu->is_apu &&
                ((amdgpu_in_reset(adev) &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
-                ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+                ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
 
        /*
         * For custom pptable uploading, skip the DPM features
@@ -1430,7 +1447,7 @@ static int smu_hw_fini(void *handle)
        return smu_smc_hw_cleanup(smu);
 }
 
-int smu_reset(struct smu_context *smu)
+static int smu_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret;
@@ -1474,7 +1491,8 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-       if (smu->is_apu)
+       /* skip CGPG when in S0ix */
+       if (smu->is_apu && !adev->in_s0ix)
                smu_set_gfx_cgpg(&adev->smu, false);
 
        return 0;
@@ -1518,9 +1536,10 @@ static int smu_resume(void *handle)
        return 0;
 }
 
-int smu_display_configuration_change(struct smu_context *smu,
-                                    const struct amd_pp_display_configuration *display_config)
+static int smu_display_configuration_change(void *handle,
+                                           const struct amd_pp_display_configuration *display_config)
 {
+       struct smu_context *smu = handle;
        int index = 0;
        int num_of_active_display = 0;
 
@@ -1676,10 +1695,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
        return ret;
 }
 
-int smu_handle_task(struct smu_context *smu,
-                   enum amd_dpm_forced_level level,
-                   enum amd_pp_task task_id,
-                   bool lock_needed)
+static int smu_handle_task(struct smu_context *smu,
+                          enum amd_dpm_forced_level level,
+                          enum amd_pp_task task_id,
+                          bool lock_needed)
 {
        int ret = 0;
 
@@ -1711,9 +1730,9 @@ out:
        return ret;
 }
 
-int smu_handle_dpm_task(void *handle,
-                       enum amd_pp_task task_id,
-                       enum amd_pm_state_type *user_state)
+static int smu_handle_dpm_task(void *handle,
+                              enum amd_pp_task task_id,
+                              enum amd_pm_state_type *user_state)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -1722,10 +1741,9 @@ int smu_handle_dpm_task(void *handle,
 
 }
 
-
-int smu_switch_power_profile(void *handle,
-                            enum PP_SMC_POWER_PROFILE type,
-                            bool en)
+static int smu_switch_power_profile(void *handle,
+                                   enum PP_SMC_POWER_PROFILE type,
+                                   bool en)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1761,7 +1779,7 @@ int smu_switch_power_profile(void *handle,
        return 0;
 }
 
-enum amd_dpm_forced_level smu_get_performance_level(void *handle)
+static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1780,7 +1798,8 @@ enum amd_dpm_forced_level smu_get_performance_level(void *handle)
        return level;
 }
 
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
+static int smu_force_performance_level(void *handle,
+                                      enum amd_dpm_forced_level level)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1815,8 +1834,9 @@ int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
        return ret;
 }
 
-int smu_set_display_count(struct smu_context *smu, uint32_t count)
+static int smu_set_display_count(void *handle, uint32_t count)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1859,7 +1879,9 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
        return ret;
 }
 
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
+static int smu_force_ppclk_levels(void *handle,
+                                 enum pp_clock_type type,
+                                 uint32_t mask)
 {
        struct smu_context *smu = handle;
        enum smu_clk_type clk_type;
@@ -1903,48 +1925,28 @@ int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
  * However, the mp1 state setting should still be granted
  * even if the dpm_enabled cleared.
  */
-int smu_set_mp1_state(void *handle,
-                     enum pp_mp1_state mp1_state)
+static int smu_set_mp1_state(void *handle,
+                            enum pp_mp1_state mp1_state)
 {
        struct smu_context *smu = handle;
-       uint16_t msg;
-       int ret;
+       int ret = 0;
 
        if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       switch (mp1_state) {
-       case PP_MP1_STATE_SHUTDOWN:
-               msg = SMU_MSG_PrepareMp1ForShutdown;
-               break;
-       case PP_MP1_STATE_UNLOAD:
-               msg = SMU_MSG_PrepareMp1ForUnload;
-               break;
-       case PP_MP1_STATE_RESET:
-               msg = SMU_MSG_PrepareMp1ForReset;
-               break;
-       case PP_MP1_STATE_NONE:
-       default:
-               mutex_unlock(&smu->mutex);
-               return 0;
-       }
-
-       ret = smu_send_smc_msg(smu, msg, NULL);
-       /* some asics may not support those messages */
-       if (ret == -EINVAL)
-               ret = 0;
-       if (ret)
-               dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+       if (smu->ppt_funcs &&
+           smu->ppt_funcs->set_mp1_state)
+               ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
 
        mutex_unlock(&smu->mutex);
 
        return ret;
 }
 
-int smu_set_df_cstate(void *handle,
-                     enum pp_df_cstate state)
+static int smu_set_df_cstate(void *handle,
+                            enum pp_df_cstate state)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2003,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu)
        return ret;
 }
 
-int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
-               struct pp_smu_wm_range_sets *clock_ranges)
+static int smu_set_watermarks_for_clock_ranges(void *handle,
+                                              struct pp_smu_wm_range_sets *clock_ranges)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2092,41 +2095,39 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block =
        .funcs = &smu_ip_funcs,
 };
 
-int smu_load_microcode(struct smu_context *smu)
+static int smu_load_microcode(void *handle)
 {
+       struct smu_context *smu = handle;
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+       if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       /* This should be used for non PSP loading */
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
+               return 0;
 
-       if (smu->ppt_funcs->load_microcode)
+       if (smu->ppt_funcs->load_microcode) {
                ret = smu->ppt_funcs->load_microcode(smu);
+               if (ret) {
+                       dev_err(adev->dev, "Load microcode failed\n");
+                       return ret;
+               }
+       }
 
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-int smu_check_fw_status(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->check_fw_status)
+       if (smu->ppt_funcs->check_fw_status) {
                ret = smu->ppt_funcs->check_fw_status(smu);
-
-       mutex_unlock(&smu->mutex);
+               if (ret) {
+                       dev_err(adev->dev, "SMC is not ready\n");
+                       return ret;
+               }
+       }
 
        return ret;
 }
 
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 {
        int ret = 0;
 
@@ -2140,7 +2141,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
        return ret;
 }
 
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 {
        struct smu_context *smu = handle;
        u32 percent;
@@ -2199,7 +2200,7 @@ int smu_get_power_limit(struct smu_context *smu,
        return ret;
 }
 
-int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit)
 {
        struct smu_context *smu = handle;
        uint32_t limit_type = limit >> 24;
@@ -2255,7 +2256,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
        return ret;
 }
 
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
+static int smu_print_ppclk_levels(void *handle,
+                                 enum pp_clock_type type,
+                                 char *buf)
 {
        struct smu_context *smu = handle;
        enum smu_clk_type clk_type;
@@ -2296,9 +2299,9 @@ int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
        return smu_print_smuclk_levels(smu, clk_type, buf);
 }
 
-int smu_od_edit_dpm_table(void *handle,
-                         enum PP_OD_DPM_TABLE_COMMAND type,
-                         long *input, uint32_t size)
+static int smu_od_edit_dpm_table(void *handle,
+                                enum PP_OD_DPM_TABLE_COMMAND type,
+                                long *input, uint32_t size)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2317,7 +2320,10 @@ int smu_od_edit_dpm_table(void *handle,
        return ret;
 }
 
-int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg)
+static int smu_read_sensor(void *handle,
+                          int sensor,
+                          void *data,
+                          int *size_arg)
 {
        struct smu_context *smu = handle;
        struct smu_umd_pstate_table *pstate_table =
@@ -2384,7 +2390,7 @@ unlock:
        return ret;
 }
 
-int smu_get_power_profile_mode(void *handle, char *buf)
+static int smu_get_power_profile_mode(void *handle, char *buf)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2402,7 +2408,9 @@ int smu_get_power_profile_mode(void *handle, char *buf)
        return ret;
 }
 
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
+static int smu_set_power_profile_mode(void *handle,
+                                     long *param,
+                                     uint32_t param_size)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2420,7 +2428,7 @@ int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
 }
 
 
-u32 smu_get_fan_control_mode(void *handle)
+static u32 smu_get_fan_control_mode(void *handle)
 {
        struct smu_context *smu = handle;
        u32 ret = 0;
@@ -2438,7 +2446,7 @@ u32 smu_get_fan_control_mode(void *handle)
        return ret;
 }
 
-int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(struct smu_context *smu, int value)
 {
        int ret = 0;
 
@@ -2463,14 +2471,15 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
        return ret;
 }
 
-void smu_pp_set_fan_control_mode(void *handle, u32 value) {
+static void smu_pp_set_fan_control_mode(void *handle, u32 value)
+{
        struct smu_context *smu = handle;
 
        smu_set_fan_control_mode(smu, value);
 }
 
 
-int smu_get_fan_speed_percent(void *handle, u32 *speed)
+static int smu_get_fan_speed_percent(void *handle, u32 *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2494,7 +2503,7 @@ int smu_get_fan_speed_percent(void *handle, u32 *speed)
        return ret;
 }
 
-int smu_set_fan_speed_percent(void *handle, u32 speed)
+static int smu_set_fan_speed_percent(void *handle, u32 speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2517,7 +2526,7 @@ int smu_set_fan_speed_percent(void *handle, u32 speed)
        return ret;
 }
 
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
+static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2538,8 +2547,9 @@ int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
        return ret;
 }
 
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2554,10 +2564,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
        return ret;
 }
 
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
-                                      enum smu_clk_type clk_type,
-                                      struct pp_clock_levels_with_latency *clocks)
+static int smu_get_clock_by_type_with_latency(void *handle,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_latency *clocks)
 {
+       struct smu_context *smu = handle;
+       enum smu_clk_type clk_type;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2565,17 +2577,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->get_clock_by_type_with_latency)
+       if (smu->ppt_funcs->get_clock_by_type_with_latency) {
+               switch (type) {
+               case amd_pp_sys_clock:
+                       clk_type = SMU_GFXCLK;
+                       break;
+               case amd_pp_mem_clock:
+                       clk_type = SMU_MCLK;
+                       break;
+               case amd_pp_dcef_clock:
+                       clk_type = SMU_DCEFCLK;
+                       break;
+               case amd_pp_disp_clock:
+                       clk_type = SMU_DISPCLK;
+                       break;
+               default:
+                       dev_err(smu->adev->dev, "Invalid clock type!\n");
+                       mutex_unlock(&smu->mutex);
+                       return -EINVAL;
+               }
+
                ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+       }
 
        mutex_unlock(&smu->mutex);
 
        return ret;
 }
 
-int smu_display_clock_voltage_request(struct smu_context *smu,
-                                     struct pp_display_clock_request *clock_req)
+static int smu_display_clock_voltage_request(void *handle,
+                                            struct pp_display_clock_request *clock_req)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2592,8 +2625,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
 }
 
 
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+static int smu_display_disable_memory_clock_switch(void *handle,
+                                                  bool disable_memory_clock_switch)
 {
+       struct smu_context *smu = handle;
        int ret = -EINVAL;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2609,8 +2644,8 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
        return ret;
 }
 
-int smu_set_xgmi_pstate(void *handle,
-                       uint32_t pstate)
+static int smu_set_xgmi_pstate(void *handle,
+                              uint32_t pstate)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2631,49 +2666,7 @@ int smu_set_xgmi_pstate(void *handle,
        return ret;
 }
 
-int smu_set_azalia_d3_pme(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->set_azalia_d3_pme)
-               ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-/*
- * On system suspending or resetting, the dpm_enabled
- * flag will be cleared. So that those SMU services which
- * are not supported will be gated.
- *
- * However, the baco/mode1 reset should still be granted
- * as they are still supported and necessary.
- */
-bool smu_baco_is_support(struct smu_context *smu)
-{
-       bool ret = false;
-
-       if (!smu->pm_enabled)
-               return false;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
-               ret = smu->ppt_funcs->baco_is_support(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-int smu_get_baco_capability(void *handle, bool *cap)
+static int smu_get_baco_capability(void *handle, bool *cap)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2693,60 +2686,7 @@ int smu_get_baco_capability(void *handle, bool *cap)
        return ret;
 }
 
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
-{
-       if (smu->ppt_funcs->baco_get_state)
-               return -EINVAL;
-
-       mutex_lock(&smu->mutex);
-       *state = smu->ppt_funcs->baco_get_state(smu);
-       mutex_unlock(&smu->mutex);
-
-       return 0;
-}
-
-int smu_baco_enter(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->baco_enter)
-               ret = smu->ppt_funcs->baco_enter(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       if (ret)
-               dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
-
-       return ret;
-}
-
-int smu_baco_exit(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->baco_exit)
-               ret = smu->ppt_funcs->baco_exit(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       if (ret)
-               dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
-
-       return ret;
-}
-
-int smu_baco_set_state(void *handle, int state)
+static int smu_baco_set_state(void *handle, int state)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2831,7 +2771,7 @@ int smu_mode1_reset(struct smu_context *smu)
        return ret;
 }
 
-int smu_mode2_reset(void *handle)
+static int smu_mode2_reset(void *handle)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2852,9 +2792,10 @@ int smu_mode2_reset(void *handle)
        return ret;
 }
 
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
-                                        struct pp_smu_nv_clock_table *max_clocks)
+static int smu_get_max_sustainable_clocks_by_dc(void *handle,
+                                               struct pp_smu_nv_clock_table *max_clocks)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2870,10 +2811,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
        return ret;
 }
 
-int smu_get_uclk_dpm_states(struct smu_context *smu,
-                           unsigned int *clock_values_in_khz,
-                           unsigned int *num_states)
+static int smu_get_uclk_dpm_states(void *handle,
+                                  unsigned int *clock_values_in_khz,
+                                  unsigned int *num_states)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2889,7 +2831,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
        return ret;
 }
 
-enum amd_pm_state_type smu_get_current_power_state(void *handle)
+static enum amd_pm_state_type smu_get_current_power_state(void *handle)
 {
        struct smu_context *smu = handle;
        enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
@@ -2907,9 +2849,10 @@ enum amd_pm_state_type smu_get_current_power_state(void *handle)
        return pm_state;
 }
 
-int smu_get_dpm_clock_table(struct smu_context *smu,
-                           struct dpm_clocks *clock_table)
+static int smu_get_dpm_clock_table(void *handle,
+                                  struct dpm_clocks *clock_table)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2925,7 +2868,7 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
        return ret;
 }
 
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
+static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
        struct smu_context *smu = handle;
        ssize_t size;
@@ -2945,7 +2888,7 @@ ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
        return size;
 }
 
-int smu_enable_mgpu_fan_boost(void *handle)
+static int smu_enable_mgpu_fan_boost(void *handle)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2963,8 +2906,10 @@ int smu_enable_mgpu_fan_boost(void *handle)
        return ret;
 }
 
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state)
+static int smu_gfx_state_change_set(void *handle,
+                                   uint32_t state)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        mutex_lock(&smu->mutex);
@@ -3026,4 +2971,31 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
        .get_power_profile_mode  = smu_get_power_profile_mode,
        .force_clock_level       = smu_force_ppclk_levels,
        .print_clock_levels      = smu_print_ppclk_levels,
+       .get_uclk_dpm_states     = smu_get_uclk_dpm_states,
+       .get_dpm_clock_table     = smu_get_dpm_clock_table,
+       .display_configuration_change        = smu_display_configuration_change,
+       .get_clock_by_type_with_latency      = smu_get_clock_by_type_with_latency,
+       .display_clock_voltage_request       = smu_display_clock_voltage_request,
+       .set_active_display_count            = smu_set_display_count,
+       .set_min_deep_sleep_dcefclk          = smu_set_deep_sleep_dcefclk,
+       .set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
+       .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
+       .get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
+       .load_firmware           = smu_load_microcode,
+       .gfx_state_change_set    = smu_gfx_state_change_set,
 };
+
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+                      uint64_t event_arg)
+{
+       int ret = -EINVAL;
+       struct smu_context *smu = &adev->smu;
+
+       if (smu->ppt_funcs->wait_for_event) {
+               mutex_lock(&smu->mutex);
+               ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
+               mutex_unlock(&smu->mutex);
+       }
+
+       return ret;
+}
index bbc03092b0a9ba57580f0afe68c12b6e8c9e3f03..77693bf0840cfc9ad8fcebb578a63cb688f6dc98 100644 (file)
@@ -2365,6 +2365,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .get_fan_parameters = arcturus_get_fan_parameters,
        .interrupt_work = smu_v11_0_interrupt_work,
        .set_light_sbr = smu_v11_0_set_light_sbr,
+       .set_mp1_state = smu_cmn_set_mp1_state,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 3d0de2c958fa8a7e1d120f2406618ffeeca4101c..f827096dc849a0079f7ac0ab22437389eeeffc97 100644 (file)
@@ -431,6 +431,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
        return 0;
 }
 
+static int navi10_set_mp1_state(struct smu_context *smu,
+                               enum pp_mp1_state mp1_state)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t mp1_fw_flags;
+       int ret = 0;
+
+       ret = smu_cmn_set_mp1_state(smu, mp1_state);
+       if (ret)
+               return ret;
+
+       if (mp1_state == PP_MP1_STATE_UNLOAD) {
+               mp1_fw_flags = RREG32_PCIE(MP1_Public |
+                                          (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+               mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
+
+               WREG32_PCIE(MP1_Public |
+                           (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
+       }
+
+       return 0;
+}
+
 static int navi10_setup_pptable(struct smu_context *smu)
 {
        int ret = 0;
@@ -3031,6 +3055,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .get_fan_parameters = navi10_get_fan_parameters,
        .post_init = navi10_post_smu_init,
        .interrupt_work = smu_v11_0_interrupt_work,
+       .set_mp1_state = navi10_set_mp1_state,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index 3621884c32935d4ac72919af701629e4934a0094..72d9c1be1835c258b02ad5c41d99dbc5a521e417 100644 (file)
@@ -3110,6 +3110,23 @@ static int sienna_cichlid_system_features_control(struct smu_context *smu,
        return smu_v11_0_system_features_control(smu, en);
 }
 
+static int sienna_cichlid_set_mp1_state(struct smu_context *smu,
+                                       enum pp_mp1_state mp1_state)
+{
+       int ret;
+
+       switch (mp1_state) {
+       case PP_MP1_STATE_UNLOAD:
+               ret = smu_cmn_set_mp1_state(smu, mp1_state);
+               break;
+       default:
+               /* Ignore others */
+               ret = 0;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
        .set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -3195,6 +3212,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .get_fan_parameters = sienna_cichlid_get_fan_parameters,
        .interrupt_work = smu_v11_0_interrupt_work,
        .gpo_control = sienna_cichlid_gpo_control,
+       .set_mp1_state = sienna_cichlid_set_mp1_state,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
index 0d137af1a78aca4360809f4519736e19cd52fb9d..6274cae4a065e415f156be6619c2cd5e9647dec7 100644 (file)
@@ -561,6 +561,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
                smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
                break;
        case 3:
+       case 4:
        default:
                v_3_3 = (struct atom_firmware_info_v3_3 *)header;
                smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
index c5b387360f019cff9d6c2f43e77e46d6d06e8b8d..7bcd35840bf27b5be7ded2db32cfcd43d2169025 100644 (file)
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
        ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
 
        if (ret)
@@ -1889,6 +1894,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
        .get_ppt_limit = vangogh_get_ppt_limit,
        .get_power_limit = vangogh_get_power_limit,
        .set_power_limit = vangogh_set_power_limit,
+       .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
 };
 
 void vangogh_set_ppt_funcs(struct smu_context *smu)
index 9813a86ca31a79530b876df2dbbc574857d17eb8..bca02a9fb489eb202adaf227a83d621cac4348dc 100644 (file)
@@ -126,7 +126,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
        MSG_MAP(SetExecuteDMATest,                   PPSMC_MSG_SetExecuteDMATest,               0),
        MSG_MAP(EnableDeterminism,                   PPSMC_MSG_EnableDeterminism,               0),
        MSG_MAP(DisableDeterminism,                  PPSMC_MSG_DisableDeterminism,              0),
-       MSG_MAP(SetUclkDpmMode,                          PPSMC_MSG_SetUclkDpmMode,              0),
+       MSG_MAP(SetUclkDpmMode,                      PPSMC_MSG_SetUclkDpmMode,                  0),
+       MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
 };
 
 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -1265,6 +1266,233 @@ static bool aldebaran_is_dpm_running(struct smu_context *smu)
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
+static void aldebaran_fill_i2c_req(SwI2cRequest_t  *req, bool write,
+                                 uint8_t address, uint32_t numbytes,
+                                 uint8_t *data)
+{
+       int i;
+
+       req->I2CcontrollerPort = 0;
+       req->I2CSpeed = 2;
+       req->SlaveAddress = address;
+       req->NumCmds = numbytes;
+
+       for (i = 0; i < numbytes; i++) {
+               SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
+
+               /* First 2 bytes are always write for lower 2b EEPROM address */
+               if (i < 2)
+                       cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
+               else
+                       cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
+
+
+               /* Add RESTART for read  after address filled */
+               cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+               /* Add STOP in the end */
+               cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+               /* Fill with data regardless if read or write to simplify code */
+               cmd->ReadWriteData = data[i];
+       }
+}
+
+static int aldebaran_i2c_read_data(struct i2c_adapter *control,
+                                              uint8_t address,
+                                              uint8_t *data,
+                                              uint32_t numbytes)
+{
+       uint32_t  i, ret = 0;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct smu_table *table = &smu_table->driver_table;
+
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
+       memset(&req, 0, sizeof(req));
+       aldebaran_fill_i2c_req(&req, false, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       /* Now read data starting with that address */
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+                                       true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+               /* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
+               for (i = 0; i < numbytes; i++)
+                       data[i] = res->SwI2cCmds[i].ReadWriteData;
+
+               dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :",
+                                 (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+       } else
+               dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret);
+
+       return ret;
+}
+
+static int aldebaran_i2c_write_data(struct i2c_adapter *control,
+                                               uint8_t address,
+                                               uint8_t *data,
+                                               uint32_t numbytes)
+{
+       uint32_t ret;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
+       memset(&req, 0, sizeof(req));
+       aldebaran_fill_i2c_req(&req, true, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ",
+                                        (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+               /*
+                * According to EEPROM spec there is a MAX of 10 ms required for
+                * EEPROM to flush internal RX buffer after STOP was issued at the
+                * end of write transaction. During this time the EEPROM will not be
+                * responsive to any more commands - so wait a bit more.
+                */
+               msleep(10);
+
+       } else
+               dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret);
+
+       return ret;
+}
+
+static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
+                             struct i2c_msg *msgs, int num)
+{
+       uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+       uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+       for (i = 0; i < num; i++) {
+               /*
+                * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+                * once and hence the data needs to be spliced into chunks and sent each
+                * chunk separately
+                */
+               data_size = msgs[i].len - 2;
+               data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+               next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+               data_ptr = msgs[i].buf + 2;
+
+               for (j = 0; j < data_size / data_chunk_size; j++) {
+                       /* Insert the EEPROM dest addess, bits 0-15 */
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = aldebaran_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
+
+                               memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+                       } else {
+
+                               memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+                               ret = aldebaran_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+
+                       next_eeprom_addr += data_chunk_size;
+                       data_ptr += data_chunk_size;
+               }
+
+               if (data_size % data_chunk_size) {
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = aldebaran_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
+
+                               memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+                       } else {
+                               memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+                               ret = aldebaran_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+fail:
+       return num;
+}
+
+static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm aldebaran_i2c_algo = {
+       .master_xfer = aldebaran_i2c_xfer,
+       .functionality = aldebaran_i2c_func,
+};
+
+static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
+
+       control->owner = THIS_MODULE;
+       control->class = I2C_CLASS_SPD;
+       control->dev.parent = &adev->pdev->dev;
+       control->algo = &aldebaran_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+       res = i2c_add_adapter(control);
+       if (res)
+               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+       return res;
+}
+
+static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+       i2c_del_adapter(control);
+}
+
 static void aldebaran_get_unique_id(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -1432,6 +1660,57 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v1_1);
 }
 
+static int aldebaran_mode2_reset(struct smu_context *smu)
+{
+       u32 smu_version;
+       int ret = 0, index;
+       struct amdgpu_device *adev = smu->adev;
+       int timeout = 10;
+
+       smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+       index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+                                               SMU_MSG_GfxDeviceDriverReset);
+
+       mutex_lock(&smu->message_lock);
+       if (smu_version >= 0x00441400) {
+               ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+               /* This is similar to FLR, wait till max FLR timeout */
+               msleep(100);
+               dev_dbg(smu->adev->dev, "restore config space...\n");
+               /* Restore the config space saved during init */
+               amdgpu_device_load_pci_state(adev->pdev);
+
+               dev_dbg(smu->adev->dev, "wait for reset ack\n");
+               while (ret == -ETIME && timeout)  {
+                       ret = smu_cmn_wait_for_response(smu);
+                       /* Wait a bit more time for getting ACK */
+                       if (ret == -ETIME) {
+                               --timeout;
+                               usleep_range(500, 1000);
+                               continue;
+                       }
+
+                       if (ret != 1) {
+                               dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+                                               SMU_RESET_MODE_2, ret);
+                               goto out;
+                       }
+               }
+
+       } else {
+               dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
+                               smu_version);
+       }
+
+       if (ret == 1)
+               ret = 0;
+out:
+       mutex_unlock(&smu->message_lock);
+
+       return ret;
+}
+
 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
 {
 #if 0
@@ -1460,6 +1739,19 @@ static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
        return true;
 }
 
+static int aldebaran_set_mp1_state(struct smu_context *smu,
+                                  enum pp_mp1_state mp1_state)
+{
+       switch (mp1_state) {
+       case PP_MP1_STATE_UNLOAD:
+               return smu_cmn_set_mp1_state(smu, mp1_state);
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs aldebaran_ppt_funcs = {
        /* init dpm */
        .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
@@ -1517,7 +1809,11 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
        .mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
        .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
-       .mode2_reset = smu_v13_0_mode2_reset,
+       .set_mp1_state = aldebaran_set_mp1_state,
+       .mode2_reset = aldebaran_mode2_reset,
+       .wait_for_event = smu_v13_0_wait_for_event,
+       .i2c_init = aldebaran_i2c_control_init,
+       .i2c_fini = aldebaran_i2c_control_fini,
 };
 
 void aldebaran_set_ppt_funcs(struct smu_context *smu)
index bd3a9c89dc440c963730601d0b3494b14e79ed60..30c9ac635105dba577ceefc97ceab8f60c945005 100644 (file)
@@ -72,8 +72,8 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
 
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 int smu_v13_0_init_microcode(struct smu_context *smu)
 {
@@ -1374,19 +1374,43 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
        return ret;
 }
 
-int smu_v13_0_mode2_reset(struct smu_context *smu)
+static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
+                                            uint64_t event_arg)
 {
-       u32 smu_version;
        int ret = 0;
-       struct amdgpu_device *adev = smu->adev;
-       smu_cmn_get_smc_version(smu, NULL, &smu_version);
-       if (smu_version >= 0x00440700)
-               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
-       else
-               dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", smu_version);
-       /*TODO: mode2 reset wait time should be shorter, will modify it later*/
+
+       dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+       return ret;
+}
+
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+                            uint64_t event_arg)
+{
+       int ret = -EINVAL;
+
+       switch (event) {
+       case SMU_EVENT_RESET_COMPLETE:
+               ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int smu_v13_0_mode2_reset(struct smu_context *smu)
+{
+       int ret;
+
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+                       SMU_RESET_MODE_2, NULL);
+       /*TODO: mode2 reset wait time should be shorter, add ASIC specific func if required */
        if (!ret)
                msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
        return ret;
 }
 
@@ -1686,10 +1710,14 @@ int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
                                  enum smu_clk_type clk_type,
                                  uint32_t *value)
 {
-       return smu_v13_0_get_dpm_freq_by_index(smu,
-                                              clk_type,
-                                              0xff,
-                                              value);
+       int ret;
+
+       ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+       /* FW returns 0 based max level, increment by one */
+       if (!ret && value)
+               ++(*value);
+
+       return ret;
 }
 
 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
index 4b45953b36d813ac7cae97b5ff6d0a687faeeda6..dc7d2e71aa6fd9265a577287836fc172c4d6a002 100644 (file)
@@ -76,10 +76,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
        *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 }
 
-static int smu_cmn_wait_for_response(struct smu_context *smu)
+int smu_cmn_wait_for_response(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
+       uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
 
        for (i = 0; i < timeout; i++) {
                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
@@ -780,3 +780,31 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        header->structure_size = structure_size;
 
 }
+
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+                         enum pp_mp1_state mp1_state)
+{
+       enum smu_message_type msg;
+       int ret;
+
+       switch (mp1_state) {
+       case PP_MP1_STATE_SHUTDOWN:
+               msg = SMU_MSG_PrepareMp1ForShutdown;
+               break;
+       case PP_MP1_STATE_UNLOAD:
+               msg = SMU_MSG_PrepareMp1ForUnload;
+               break;
+       case PP_MP1_STATE_RESET:
+               msg = SMU_MSG_PrepareMp1ForReset;
+               break;
+       case PP_MP1_STATE_NONE:
+       default:
+               return 0;
+       }
+
+       ret = smu_cmn_send_smc_msg(smu, msg, NULL);
+       if (ret)
+               dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+
+       return ret;
+}
index c6925018557555c91e4efc8ab4ee3263f07b52b8..da6ff6f024f91bf53c6a1a5d292793ed46cf1fb5 100644 (file)
@@ -37,6 +37,8 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
                         enum smu_message_type msg,
                         uint32_t *read_arg);
 
+int smu_cmn_wait_for_response(struct smu_context *smu);
+
 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                                   enum smu_cmn2asic_mapping_type type,
                                   uint32_t index);
@@ -99,5 +101,8 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
 
 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+                         enum pp_mp1_state mp1_state);
+
 #endif
 #endif
index 5b4547e0f775f97312630a3b1ef1d9261ba16281..dd9ed000ad4c308709e356ee503349f6f53af2f6 100644 (file)
@@ -1183,7 +1183,7 @@ EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
  * This function walks the current configuration and adds all connectors
  * currently using @crtc to the atomic configuration @state. Note that this
  * function must acquire the connection mutex. This can potentially cause
- * unneeded seralization if the update is just for the planes on one CRTC. Hence
+ * unneeded serialization if the update is just for the planes on one CRTC. Hence
  * drivers and helpers should only call this when really needed (e.g. when a
  * full modeset needs to happen due to some change).
  *
@@ -1248,7 +1248,7 @@ EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  *
  * Since acquiring a plane state will always also acquire the w/w mutex of the
  * current CRTC for that plane (if there is any) adding all the plane states for
- * a CRTC will not reduce parallism of atomic updates.
+ * a CRTC will not reduce parallelism of atomic updates.
  *
  * Returns:
  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
index a0459b168d538c95ecf2fbd226b6fc8528d8a5ff..7444dc0e0c0e25ed09745ca9850cf0d253ddec62 100644 (file)
@@ -2569,6 +2569,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
                       rdev->me_fw->size, fw_name);
                err = -EINVAL;
+               goto out;
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
@@ -2579,6 +2580,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
                       rdev->rlc_fw->size, fw_name);
                err = -EINVAL;
+               goto out;
        }
 
        if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
index 34b7c6f1647985e5138dbbbe90a7ba8c0f5294ed..8be4799a98eff701e4f378423b66e1055c31db9a 100644 (file)
@@ -38,7 +38,7 @@ extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes,
 
 
 struct r600_cs_track {
-       /* configuration we miror so that we use same code btw kms/ums */
+       /* configuration we mirror so that we use same code btw kms/ums */
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
@@ -963,7 +963,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
  *
  * This function will test against r600_reg_safe_bm and return 0
  * if register is safe. If register is not flag as safe this function
- * will test it against a list of register needind special handling.
+ * will test it against a list of register needing special handling.
  */
 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
@@ -2336,7 +2336,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
 /**
  * r600_dma_cs_next_reloc() - parse next reloc
  * @p:         parser structure holding parsing context.
- * @cs_reloc:          reloc informations
+ * @cs_reloc:          reloc information
  *
  * Return the next reloc, do bo validation and compute
  * GPU offset using the provided start.
index 66a0e736cc4daa04a7083c46c651d3540dfcdcb5..59cf1d288465a1741a346f12315e717b3aa9487c 100644 (file)
@@ -241,6 +241,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
                to_radeon_connector(connector);
        struct radeon_connector *master = radeon_connector->mst_port;
 
+       if (drm_connector_is_unregistered(connector))
+               return connector_status_disconnected;
+
        return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
                                      radeon_connector->port);
 }
index 804f7a427be7a4324b91f538affadf26c30b6350..cee11c55fd156914d050979e48dba217b9e1c500 100644 (file)
@@ -380,6 +380,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
        }
 #endif
        man = ttm_manager_type(bdev, TTM_PL_VRAM);
+       if (!man)
+               return 0;
        return ttm_resource_manager_evict_all(bdev, man);
 }
 
index b6737be9165cf4f5ac200a48e3badc9d1b579ce4..0c1950f4e146f158f00d2cc879c5bac3e07d0325 100644 (file)
@@ -360,11 +360,10 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        int cp = rdev->pm.profile;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (cp == PM_PROFILE_AUTO) ? "auto" :
-                       (cp == PM_PROFILE_LOW) ? "low" :
-                       (cp == PM_PROFILE_MID) ? "mid" :
-                       (cp == PM_PROFILE_HIGH) ? "high" : "default");
+       return sysfs_emit(buf, "%s\n", (cp == PM_PROFILE_AUTO) ? "auto" :
+                         (cp == PM_PROFILE_LOW) ? "low" :
+                         (cp == PM_PROFILE_MID) ? "mid" :
+                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
 }
 
 static ssize_t radeon_set_pm_profile(struct device *dev,
@@ -415,9 +414,8 @@ static ssize_t radeon_get_pm_method(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        int pm = rdev->pm.pm_method;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == PM_METHOD_DYNPM) ? "dynpm" :
-                       (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
+       return sysfs_emit(buf, "%s\n", (pm == PM_METHOD_DYNPM) ? "dynpm" :
+                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
 }
 
 static ssize_t radeon_set_pm_method(struct device *dev,
@@ -472,9 +470,9 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
-                       (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+       return sysfs_emit(buf, "%s\n",
+                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
 static ssize_t radeon_set_dpm_state(struct device *dev,
@@ -518,11 +516,11 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
 
        if  ((rdev->flags & RADEON_IS_PX) &&
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return snprintf(buf, PAGE_SIZE, "off\n");
+               return sysfs_emit(buf, "off\n");
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-                       (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+       return sysfs_emit(buf, "%s\n",
+                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
 }
 
 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
@@ -685,7 +683,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
        else
                temp = 0;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
@@ -701,7 +699,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
        else
                temp = rdev->pm.dpm.thermal.max_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
@@ -731,7 +729,7 @@ static ssize_t radeon_hwmon_show_sclk(struct device *dev,
           for hwmon */
        sclk *= 10000;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", sclk);
+       return sysfs_emit(buf, "%u\n", sclk);
 }
 
 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL,
@@ -752,7 +750,7 @@ static ssize_t radeon_hwmon_show_vddc(struct device *dev,
        if (rdev->asic->dpm.get_current_vddc)
                vddc = rdev->asic->dpm.get_current_vddc(rdev);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", vddc);
+       return sysfs_emit(buf, "%u\n", vddc);
 }
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, radeon_hwmon_show_vddc, NULL,
index 476ce9c24b9f50e0ea09d58dae009a5ea94e7bc8..380b3007fd0b0de66111caea4183e37212e11a50 100644 (file)
@@ -360,7 +360,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm
        if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
                /* check that we only pin down anonymous memory
                   to prevent problems with writeback */
-               unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+               unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
                vma = find_vma(gtt->usermm, gtt->userptr);
                if (!vma || vma->vm_file || vma->vm_end < end)
@@ -382,7 +382,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm
        } while (pinned < ttm->num_pages);
 
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
@@ -415,7 +415,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
        /* double check that we don't free the table twice */
-       if (!ttm->sg->sgl)
+       if (!ttm->sg || !ttm->sg->sgl)
                return;
 
        /* free the sg table and pages again */
@@ -481,13 +481,14 @@ static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *tt
        struct radeon_ttm_tt *gtt = (void *)ttm;
        struct radeon_device *rdev = radeon_get_rdev(bdev);
 
+       if (gtt->userptr)
+               radeon_ttm_tt_unpin_userptr(bdev, ttm);
+
        if (!gtt->bound)
                return;
 
        radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
 
-       if (gtt->userptr)
-               radeon_ttm_tt_unpin_userptr(bdev, ttm);
        gtt->bound = false;
 }
 
index d82a7ebf6099e6e968cd4b7316e9cf87ca3a7107..92d8de24d0a175afd177f8c1fa34170cd0c51228 100644 (file)
@@ -361,40 +361,16 @@ static void drm_sched_job_timedout(struct work_struct *work)
   */
 void drm_sched_increase_karma(struct drm_sched_job *bad)
 {
-       int i;
-       struct drm_sched_entity *tmp;
-       struct drm_sched_entity *entity;
-       struct drm_gpu_scheduler *sched = bad->sched;
-
-       /* don't increase @bad's karma if it's from KERNEL RQ,
-        * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
-        * corrupt but keep in mind that kernel jobs always considered good.
-        */
-       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
-               atomic_inc(&bad->karma);
-               for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
-                    i++) {
-                       struct drm_sched_rq *rq = &sched->sched_rq[i];
-
-                       spin_lock(&rq->lock);
-                       list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
-                               if (bad->s_fence->scheduled.context ==
-                                   entity->fence_context) {
-                                       if (atomic_read(&bad->karma) >
-                                           bad->sched->hang_limit)
-                                               if (entity->guilty)
-                                                       atomic_set(entity->guilty, 1);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&rq->lock);
-                       if (&entity->list != &rq->entities)
-                               break;
-               }
-       }
+       drm_sched_increase_karma_ext(bad, 1);
 }
 EXPORT_SYMBOL(drm_sched_increase_karma);
 
+void drm_sched_reset_karma(struct drm_sched_job *bad)
+{
+       drm_sched_increase_karma_ext(bad, 0);
+}
+EXPORT_SYMBOL(drm_sched_reset_karma);
+
 /**
  * drm_sched_stop - stop the scheduler
  *
@@ -533,15 +509,32 @@ EXPORT_SYMBOL(drm_sched_start);
  *
  */
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
+{
+       drm_sched_resubmit_jobs_ext(sched, INT_MAX);
+}
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+
+/**
+ * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
+ *
+ * @sched: scheduler instance
+ * @max: job numbers to relaunch
+ *
+ */
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
 {
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
        struct dma_fence *fence;
+       int i = 0;
 
        list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
 
+               if (i >= max)
+                       break;
+
                if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
                        found_guilty = true;
                        guilty_context = s_job->s_fence->scheduled.context;
@@ -552,6 +545,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 
                dma_fence_put(s_job->s_fence->parent);
                fence = sched->ops->run_job(s_job);
+               i++;
 
                if (IS_ERR_OR_NULL(fence)) {
                        if (IS_ERR(fence))
@@ -563,7 +557,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
                }
        }
 }
-EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
 
 /**
  * drm_sched_job_init - init a scheduler job
@@ -903,3 +897,48 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
        sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
+
+/**
+ * drm_sched_increase_karma_ext - Update sched_entity guilty flag
+ *
+ * @bad: The job guilty of time out
+ * @type: type for increase/reset karma
+ *
+ */
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
+{
+       int i;
+       struct drm_sched_entity *tmp;
+       struct drm_sched_entity *entity;
+       struct drm_gpu_scheduler *sched = bad->sched;
+
+       /* don't change @bad's karma if it's from KERNEL RQ,
+        * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+        * corrupt but keep in mind that kernel jobs always considered good.
+        */
+       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+               if (type == 0)
+                       atomic_set(&bad->karma, 0);
+               else if (type == 1)
+                       atomic_inc(&bad->karma);
+
+               for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
+                    i++) {
+                       struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+                       spin_lock(&rq->lock);
+                       list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+                               if (bad->s_fence->scheduled.context ==
+                                   entity->fence_context) {
+                                       if (entity->guilty)
+                                               atomic_set(entity->guilty, type);
+                                       break;
+                               }
+                       }
+                       spin_unlock(&rq->lock);
+                       if (&entity->list != &rq->entities)
+                               break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_sched_increase_karma_ext);
index f888b5e9583ae2efce4c75a484acff8e93be66aa..10225a0a35d0a442b0128ed859e5d8af4231ae89 100644 (file)
@@ -322,7 +322,10 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
 void drm_sched_increase_karma(struct drm_sched_job *bad);
+void drm_sched_reset_karma(struct drm_sched_job *bad);
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity);
 void drm_sched_fault(struct drm_gpu_scheduler *sched);