Merge tag 'drm-intel-next-fixes-2018-12-27' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 28 Dec 2018 04:05:35 +0000 (14:05 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 28 Dec 2018 04:05:46 +0000 (14:05 +1000)
GVT fixes for v4.21-rc1

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87imzfwh73.fsf@intel.com
378 files changed:
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/display/msm/gpu.txt
Documentation/devicetree/bindings/display/msm/mdp4.txt
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/cz_ih.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/si_ih.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dm_event_log.h
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
drivers/gpu/drm/amd/display/dc/dm_services.h
drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
drivers/gpu/drm/amd/display/dc/inc/compressor.h
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h [new file with mode: 0644]
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/drm_damage_helper.c
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx.xml.h
drivers/gpu/drm/msm/adreno/a2xx_gpu.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a2xx_gpu.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a3xx.xml.h
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx.xml.h
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx.xml.h
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/adreno/a6xx.xml.h
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h [new file with mode: 0644]
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
drivers/gpu/drm/msm/adreno/adreno_common.xml.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h [deleted file]
drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
drivers/gpu/drm/msm/edp/edp.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
drivers/gpu/drm/msm/hdmi/hdmi_phy.c
drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_trace.h [new file with mode: 0644]
drivers/gpu/drm/msm/msm_gpu_tracepoints.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_gpummu.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/msm/msm_ringbuffer.h
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv50/Kbuild
drivers/gpu/drm/nouveau/dispnv50/atom.h
drivers/gpu/drm/nouveau/dispnv50/base907c.c
drivers/gpu/drm/nouveau/dispnv50/core.c
drivers/gpu/drm/nouveau/dispnv50/core.h
drivers/gpu/drm/nouveau/dispnv50/corec37d.c
drivers/gpu/drm/nouveau/dispnv50/corec57d.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/dispnv50/curs.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/disp.h
drivers/gpu/drm/nouveau/dispnv50/head.c
drivers/gpu/drm/nouveau/dispnv50/head.h
drivers/gpu/drm/nouveau/dispnv50/head507d.c
drivers/gpu/drm/nouveau/dispnv50/head907d.c
drivers/gpu/drm/nouveau/dispnv50/headc37d.c
drivers/gpu/drm/nouveau/dispnv50/headc57d.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/dispnv50/lut.c
drivers/gpu/drm/nouveau/dispnv50/lut.h
drivers/gpu/drm/nouveau/dispnv50/wimm.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/dispnv50/wndw.h
drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/cla06f.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvif/clc36f.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_chan.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_debugfs.c
drivers/gpu/drm/nouveau/nouveau_dma.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_vmm.h
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/selftests/test-drm_damage_helper.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/falcon.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/hub.h
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/sor.h
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw_1x06.c
drivers/gpu/host1x/hw/host1x07.c [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x07.h [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x07_hardware.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x06_uclass.h
drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x07_uclass.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x07_vm.h [new file with mode: 0644]
drivers/gpu/host1x/hw/syncpt_hw.c
include/drm/gpu_scheduler.h
include/drm/ttm/ttm_execbuf_util.h
include/uapi/drm/msm_drm.h
include/uapi/linux/kfd_ioctl.h
include/video/samsung_fimd.h

index dfc743219bd88e4ab858470fac7d77752ff261a3..9ae94694272071281285be6e1185d5ad720ef68f 100644 (file)
@@ -106,6 +106,7 @@ Required properties:
 - clocks: Phandles to device clocks. See [1] for details on clock bindings.
 - clock-names: the following clocks are required:
   * "iface"
+  * "ref" (only required for new DTS files/entries)
   For 28nm HPM/LP, 28nm 8960 PHYs:
 - vddio-supply: phandle to vdd-io regulator device node
   For 20nm PHY:
index 43fac0fe09bbaf69bfbc1539ffbf095a3078bba8..ac8df3b871f900a0672f266a5b05635a7aca01cf 100644 (file)
@@ -1,11 +1,13 @@
 Qualcomm adreno/snapdragon GPU
 
 Required properties:
-- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno" or
+             "amd,imageon-XYZ.W", "amd,imageon"
     for example: "qcom,adreno-306.0", "qcom,adreno"
   Note that you need to list the less specific "qcom,adreno" (since this
   is what the device is matched on), in addition to the more specific
   with the chip-id.
+  If "amd,imageon" is used, there should be no top level msm device.
 - reg: Physical base address and length of the controller's registers.
 - interrupts: The interrupt signal from the gpu.
 - clocks: device clocks
index 3c341a15ccdc6a51ad3bf313d2b0ec9566a0d5f9..b07eeb38f709829894a65ed66d74628e36f0c0bf 100644 (file)
@@ -38,6 +38,8 @@ Required properties:
 Optional properties:
 - clock-names: the following clocks are optional:
   * "lut_clk"
+- qcom,lcdc-align-lsb: Boolean value indicating that LSB alignment should be
+  used for LCDC. This is only valid for 18bpp panels.
 
 Example:
 
index c8ad6bf6618adb0c38d9e415212c1b53b821c37e..88db3c263e5f066c09be77c6b57292ca4a19f35e 100644 (file)
@@ -82,6 +82,7 @@
 #include "amdgpu_bo_list.h"
 #include "amdgpu_gem.h"
 #include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
 
 #define MAX_GPU_INSTANCE               16
 
@@ -163,6 +164,7 @@ extern int amdgpu_si_support;
 extern int amdgpu_cik_support;
 #endif
 
+#define AMDGPU_VM_MAX_NUM_CTX                  4096
 #define AMDGPU_SG_THRESHOLD                    (256*1024*1024)
 #define AMDGPU_DEFAULT_GTT_SIZE_MB             3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
@@ -862,6 +864,9 @@ struct amdgpu_device {
        /* GDS */
        struct amdgpu_gds               gds;
 
+       /* KFD */
+       struct amdgpu_kfd_dev           kfd;
+
        /* display related functionality */
        struct amdgpu_display_manager dm;
 
@@ -875,9 +880,6 @@ struct amdgpu_device {
        atomic64_t visible_pin_size;
        atomic64_t gart_pin_size;
 
-       /* amdkfd interface */
-       struct kfd_dev          *kfd;
-
        /* soc15 register offset based on ip, instance and  segment */
        uint32_t                *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
 
@@ -910,7 +912,9 @@ struct amdgpu_device {
        bool                            in_gpu_reset;
        struct mutex  lock_reset;
        struct amdgpu_doorbell_index doorbell_index;
+
        int asic_reset_res;
+       struct work_struct              xgmi_reset_work;
 };
 
 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
index 47db65926d71782a44ae53dd82199490b59820bc..4376b17ca594614f9a02b5f9dfd70601cd06c017 100644 (file)
@@ -886,6 +886,5 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
 void amdgpu_acpi_fini(struct amdgpu_device *adev)
 {
        unregister_acpi_notifier(&adev->acpi_nb);
-       if (adev->atif)
-               kfree(adev->atif);
+       kfree(adev->atif);
 }
index d693b80476536cd13f943ffa69e889f6cc4ef2fd..2dfaf158ef073340e778c9f51a9e6bc905f3f48d 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
 #include <linux/module.h>
+#include <linux/dma-buf.h>
 
 const struct kgd2kfd_calls *kgd2kfd;
 
 static const unsigned int compute_vmid_bitmap = 0xFF00;
 
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
 int amdgpu_amdkfd_init(void)
 {
+       struct sysinfo si;
        int ret;
 
+       si_meminfo(&si);
+       amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+       amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
 #ifdef CONFIG_HSA_AMD
        ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
        if (ret)
@@ -87,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
                return;
        }
 
-       adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
-                                  adev->pdev, kfd2kgd);
+       adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+                                      adev->pdev, kfd2kgd);
+
+       if (adev->kfd.dev)
+               amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
 }
 
 /**
@@ -128,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
 {
        int i, n;
        int last_valid_bit;
-       if (adev->kfd) {
+
+       if (adev->kfd.dev) {
                struct kgd2kfd_shared_resources gpu_resources = {
                        .compute_vmid_bitmap = compute_vmid_bitmap,
                        .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -167,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                                &gpu_resources.doorbell_start_offset);
 
                if (adev->asic_type < CHIP_VEGA10) {
-                       kgd2kfd->device_init(adev->kfd, &gpu_resources);
+                       kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
                        return;
                }
 
@@ -196,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
                gpu_resources.reserved_doorbell_mask = 0x1e0;
                gpu_resources.reserved_doorbell_val  = 0x0e0;
 
-               kgd2kfd->device_init(adev->kfd, &gpu_resources);
+               kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
        }
 }
 
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
 {
-       if (adev->kfd) {
-               kgd2kfd->device_exit(adev->kfd);
-               adev->kfd = NULL;
+       if (adev->kfd.dev) {
+               kgd2kfd->device_exit(adev->kfd.dev);
+               adev->kfd.dev = NULL;
        }
 }
 
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                const void *ih_ring_entry)
 {
-       if (adev->kfd)
-               kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+       if (adev->kfd.dev)
+               kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
 {
-       if (adev->kfd)
-               kgd2kfd->suspend(adev->kfd);
+       if (adev->kfd.dev)
+               kgd2kfd->suspend(adev->kfd.dev);
 }
 
 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
 {
        int r = 0;
 
-       if (adev->kfd)
-               r = kgd2kfd->resume(adev->kfd);
+       if (adev->kfd.dev)
+               r = kgd2kfd->resume(adev->kfd.dev);
 
        return r;
 }
@@ -235,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
 {
        int r = 0;
 
-       if (adev->kfd)
-               r = kgd2kfd->pre_reset(adev->kfd);
+       if (adev->kfd.dev)
+               r = kgd2kfd->pre_reset(adev->kfd.dev);
 
        return r;
 }
@@ -245,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
 {
        int r = 0;
 
-       if (adev->kfd)
-               r = kgd2kfd->post_reset(adev->kfd);
+       if (adev->kfd.dev)
+               r = kgd2kfd->post_reset(adev->kfd.dev);
 
        return r;
 }
@@ -419,6 +434,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
        cu_info->lds_size = acu_info.lds_size;
 }
 
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+                                 struct kgd_dev **dma_buf_kgd,
+                                 uint64_t *bo_size, void *metadata_buffer,
+                                 size_t buffer_size, uint32_t *metadata_size,
+                                 uint32_t *flags)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       struct dma_buf *dma_buf;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *bo;
+       uint64_t metadata_flags;
+       int r = -EINVAL;
+
+       dma_buf = dma_buf_get(dma_buf_fd);
+       if (IS_ERR(dma_buf))
+               return PTR_ERR(dma_buf);
+
+       if (dma_buf->ops != &amdgpu_dmabuf_ops)
+               /* Can't handle non-graphics buffers */
+               goto out_put;
+
+       obj = dma_buf->priv;
+       if (obj->dev->driver != adev->ddev->driver)
+               /* Can't handle buffers from different drivers */
+               goto out_put;
+
+       adev = obj->dev->dev_private;
+       bo = gem_to_amdgpu_bo(obj);
+       if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+                                   AMDGPU_GEM_DOMAIN_GTT)))
+               /* Only VRAM and GTT BOs are supported */
+               goto out_put;
+
+       r = 0;
+       if (dma_buf_kgd)
+               *dma_buf_kgd = (struct kgd_dev *)adev;
+       if (bo_size)
+               *bo_size = amdgpu_bo_size(bo);
+       if (metadata_size)
+               *metadata_size = bo->metadata_size;
+       if (metadata_buffer)
+               r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+                                          metadata_size, &metadata_flags);
+       if (flags) {
+               *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+                       ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+               if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+                       *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+       }
+
+out_put:
+       dma_buf_put(dma_buf);
+       return r;
+}
+
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -501,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
 {
-       if (adev->kfd) {
+       if (adev->kfd.dev) {
                if ((1 << vmid) & compute_vmid_bitmap)
                        return true;
        }
@@ -515,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
        return false;
 }
 
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 {
 }
 
index bcf587b4ba9809f40f2efa5e8c6d6c97db207bd5..70429f7aa9a84c0189ce3e3748a55f4b3549b08b 100644 (file)
@@ -27,7 +27,6 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/mmu_context.h>
 #include <linux/workqueue.h>
 #include <kgd_kfd_interface.h>
 #include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
 #include "amdgpu_vm.h"
 
 extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
 
 struct amdgpu_device;
 
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
        char timeline_name[TASK_COMM_LEN];
 };
 
+struct amdgpu_kfd_dev {
+       struct kfd_dev *dev;
+       uint64_t vram_used;
+};
+
 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
                                                       struct mm_struct *mm);
 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -144,6 +149,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
 
 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+                                 struct kgd_dev **dmabuf_kgd,
+                                 uint64_t *bo_size, void *metadata_buffer,
+                                 size_t buffer_size, uint32_t *metadata_size,
+                                 uint32_t *flags);
 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
 
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
                                              struct kfd_vm_fault_info *info);
 
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+                                     struct dma_buf *dmabuf,
+                                     uint64_t va, void *vm,
+                                     struct kgd_mem **mem, uint64_t *size,
+                                     uint64_t *mmap_offset);
+
 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
 
 #endif /* AMDGPU_AMDKFD_H_INCLUDED */
index 72a357dae07006d9ce46cd72df93d3b5cf201094..ff7fac7df34b5be7b7e46cabc1f743e3775a8761 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
index 0e2a56b6a9b68ba557c149ddefaa41cfc482c783..56ea929f524b5c31831abdb8d50233aa01929636 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
index 03b604c96d94e24618e5ef3b4baa741938c49204..5c51d4910650934752d22c5bd2b0f06c768708ee 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fdtable.h>
 #include <linux/uaccess.h>
 #include <linux/firmware.h>
+#include <linux/mmu_context.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
 #include "amdgpu_amdkfd.h"
index f3129b912714b0cac63cd2d60469e35d1778795e..be1ab43473c6c727bd1832efa3d5b7c09edffed2 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
 #include <drm/drmP.h>
 #include "amdgpu_object.h"
 #include "amdgpu_vm.h"
@@ -110,17 +111,17 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
                (kfd_mem_limit.max_ttm_mem_limit >> 20));
 }
 
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
                uint64_t size, u32 domain, bool sg)
 {
-       size_t acc_size, system_mem_needed, ttm_mem_needed;
+       size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+       uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
        int ret = 0;
 
        acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
                                       sizeof(struct amdgpu_bo));
 
-       spin_lock(&kfd_mem_limit.mem_limit_lock);
-
+       vram_needed = 0;
        if (domain == AMDGPU_GEM_DOMAIN_GTT) {
                /* TTM GTT memory */
                system_mem_needed = acc_size + size;
@@ -133,23 +134,30 @@ static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
                /* VRAM and SG */
                system_mem_needed = acc_size;
                ttm_mem_needed = acc_size;
+               if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+                       vram_needed = size;
        }
 
+       spin_lock(&kfd_mem_limit.mem_limit_lock);
+
        if ((kfd_mem_limit.system_mem_used + system_mem_needed >
-               kfd_mem_limit.max_system_mem_limit) ||
-               (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
-               kfd_mem_limit.max_ttm_mem_limit))
+            kfd_mem_limit.max_system_mem_limit) ||
+           (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+            kfd_mem_limit.max_ttm_mem_limit) ||
+           (adev->kfd.vram_used + vram_needed >
+            adev->gmc.real_vram_size - reserved_for_pt)) {
                ret = -ENOMEM;
-       else {
+       else {
                kfd_mem_limit.system_mem_used += system_mem_needed;
                kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+               adev->kfd.vram_used += vram_needed;
        }
 
        spin_unlock(&kfd_mem_limit.mem_limit_lock);
        return ret;
 }
 
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
+static void unreserve_mem_limit(struct amdgpu_device *adev,
                uint64_t size, u32 domain, bool sg)
 {
        size_t acc_size;
@@ -167,6 +175,11 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
        } else {
                kfd_mem_limit.system_mem_used -= acc_size;
                kfd_mem_limit.ttm_mem_used -= acc_size;
+               if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+                       adev->kfd.vram_used -= size;
+                       WARN_ONCE(adev->kfd.vram_used < 0,
+                                 "kfd VRAM memory accounting unbalanced");
+               }
        }
        WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
                  "kfd system memory accounting unbalanced");
@@ -176,29 +189,18 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
        spin_unlock(&kfd_mem_limit.mem_limit_lock);
 }
 
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
 {
-       spin_lock(&kfd_mem_limit.mem_limit_lock);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       u32 domain = bo->preferred_domains;
+       bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
 
        if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
-               kfd_mem_limit.system_mem_used -=
-                       (bo->tbo.acc_size + amdgpu_bo_size(bo));
-               kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
-       } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
-               kfd_mem_limit.system_mem_used -=
-                       (bo->tbo.acc_size + amdgpu_bo_size(bo));
-               kfd_mem_limit.ttm_mem_used -=
-                       (bo->tbo.acc_size + amdgpu_bo_size(bo));
-       } else {
-               kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
-               kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
+               domain = AMDGPU_GEM_DOMAIN_CPU;
+               sg = false;
        }
-       WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
-                 "kfd system memory accounting unbalanced");
-       WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
-                 "kfd TTM memory accounting unbalanced");
 
-       spin_unlock(&kfd_mem_limit.mem_limit_lock);
+       unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
 }
 
 
@@ -535,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
        struct amdgpu_bo *bo = mem->bo;
 
        INIT_LIST_HEAD(&entry->head);
-       entry->shared = true;
+       entry->num_shared = 1;
        entry->bo = &bo->tbo;
        mutex_lock(&process_info->lock);
        if (userptr)
@@ -676,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
 
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
-       ctx->kfd_bo.tv.shared = true;
+       ctx->kfd_bo.tv.num_shared = 1;
        ctx->kfd_bo.user_pages = NULL;
        list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 
@@ -740,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
 
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
-       ctx->kfd_bo.tv.shared = true;
+       ctx->kfd_bo.tv.num_shared = 1;
        ctx->kfd_bo.user_pages = NULL;
        list_add(&ctx->kfd_bo.tv.head, &ctx->list);
 
@@ -885,6 +887,24 @@ update_gpuvm_pte_failed:
        return ret;
 }
 
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+       struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+       if (!sg)
+               return NULL;
+       if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+               kfree(sg);
+               return NULL;
+       }
+       sg->sgl->dma_address = addr;
+       sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+       sg->sgl->dma_length = size;
+#endif
+       return sg;
+}
+
 static int process_validate_vms(struct amdkfd_process_info *process_info)
 {
        struct amdgpu_vm *peer_vm;
@@ -1168,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+       enum ttm_bo_type bo_type = ttm_bo_type_device;
+       struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
        struct amdgpu_bo *bo;
        struct amdgpu_bo_param bp;
@@ -1196,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                if (!offset || !*offset)
                        return -EINVAL;
                user_addr = *offset;
+       } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+               domain = AMDGPU_GEM_DOMAIN_GTT;
+               alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+               bo_type = ttm_bo_type_sg;
+               alloc_flags = 0;
+               if (size > UINT_MAX)
+                       return -EINVAL;
+               sg = create_doorbell_sg(*offset, size);
+               if (!sg)
+                       return -ENOMEM;
        } else {
                return -EINVAL;
        }
 
        *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
-       if (!*mem)
-               return -ENOMEM;
+       if (!*mem) {
+               ret = -ENOMEM;
+               goto err;
+       }
        INIT_LIST_HEAD(&(*mem)->bo_va_list);
        mutex_init(&(*mem)->lock);
        (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1235,8 +1269,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
        amdgpu_sync_create(&(*mem)->sync);
 
-       ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size,
-                                                    alloc_domain, false);
+       ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
        if (ret) {
                pr_debug("Insufficient system memory\n");
                goto err_reserve_limit;
@@ -1250,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        bp.byte_align = byte_align;
        bp.domain = alloc_domain;
        bp.flags = alloc_flags;
-       bp.type = ttm_bo_type_device;
+       bp.type = bo_type;
        bp.resv = NULL;
        ret = amdgpu_bo_create(adev, &bp, &bo);
        if (ret) {
@@ -1258,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                                domain_string(alloc_domain), ret);
                goto err_bo_create;
        }
+       if (bo_type == ttm_bo_type_sg) {
+               bo->tbo.sg = sg;
+               bo->tbo.ttm->sg = sg;
+       }
        bo->kfd_bo = *mem;
        (*mem)->bo = bo;
        if (user_addr)
@@ -1289,10 +1326,15 @@ allocate_init_user_pages_failed:
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
 err_bo_create:
-       unreserve_system_mem_limit(adev, size, alloc_domain, false);
+       unreserve_mem_limit(adev, size, alloc_domain, !!sg);
 err_reserve_limit:
        mutex_destroy(&(*mem)->lock);
        kfree(*mem);
+err:
+       if (sg) {
+               sg_free_table(sg);
+               kfree(sg);
+       }
        return ret;
 }
 
@@ -1362,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        /* Free the sync object */
        amdgpu_sync_free(&mem->sync);
 
+       /* If the SG is not NULL, it's one we created for a doorbell
+        * BO. We need to free it.
+        */
+       if (mem->bo->tbo.sg) {
+               sg_free_table(mem->bo->tbo.sg);
+               kfree(mem->bo->tbo.sg);
+       }
+
        /* Free the BO*/
        amdgpu_bo_unref(&mem->bo);
        mutex_destroy(&mem->lock);
@@ -1664,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
        return 0;
 }
 
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+                                     struct dma_buf *dma_buf,
+                                     uint64_t va, void *vm,
+                                     struct kgd_mem **mem, uint64_t *size,
+                                     uint64_t *mmap_offset)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *bo;
+       struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+       if (dma_buf->ops != &amdgpu_dmabuf_ops)
+               /* Can't handle non-graphics buffers */
+               return -EINVAL;
+
+       obj = dma_buf->priv;
+       if (obj->dev->dev_private != adev)
+               /* Can't handle buffers from other devices */
+               return -EINVAL;
+
+       bo = gem_to_amdgpu_bo(obj);
+       if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+                                   AMDGPU_GEM_DOMAIN_GTT)))
+               /* Only VRAM and GTT BOs are supported */
+               return -EINVAL;
+
+       *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+       if (!*mem)
+               return -ENOMEM;
+
+       if (size)
+               *size = amdgpu_bo_size(bo);
+
+       if (mmap_offset)
+               *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+       INIT_LIST_HEAD(&(*mem)->bo_va_list);
+       mutex_init(&(*mem)->lock);
+       (*mem)->mapping_flags =
+               AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+               AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+       (*mem)->bo = amdgpu_bo_ref(bo);
+       (*mem)->va = va;
+       (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+               AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+       (*mem)->mapped_to_gpu_memory = 0;
+       (*mem)->process_info = avm->process_info;
+       add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+       amdgpu_sync_create(&(*mem)->sync);
+
+       return 0;
+}
+
 /* Evict a userptr BO by stopping the queues if necessary
  *
  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1830,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                            validate_list.head) {
                list_add_tail(&mem->resv_list.head, &resv_list);
                mem->resv_list.bo = mem->validate_list.bo;
-               mem->resv_list.shared = mem->validate_list.shared;
+               mem->resv_list.num_shared = mem->validate_list.num_shared;
        }
 
        /* Reserve all BOs and page tables for validation */
@@ -2049,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
 
                list_add_tail(&mem->resv_list.head, &ctx.list);
                mem->resv_list.bo = mem->validate_list.bo;
-               mem->resv_list.shared = mem->validate_list.shared;
+               mem->resv_list.num_shared = mem->validate_list.num_shared;
        }
 
        ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
index 14d2982a47cce32b028e151f6a78b291efe08263..5c79da8e1150ac9ee30e6ce90579961173033c5d 100644 (file)
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
                entry->priority = min(info[i].bo_priority,
                                      AMDGPU_BO_LIST_MAX_PRIORITY);
                entry->tv.bo = &bo->tbo;
-               entry->tv.shared = !bo->prime_shared_count;
 
                if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
                        list->gds_obj = bo;
index dc54e9efd910f10ab4fc404bbb899d787958cc14..5dc3ee372e2f9959e945e56324621dc894222667 100644 (file)
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
        p->uf_entry.priority = 0;
        p->uf_entry.tv.bo = &bo->tbo;
-       p->uf_entry.tv.shared = true;
+       /* One for TTM and one for the CS job */
+       p->uf_entry.tv.num_shared = 2;
        p->uf_entry.user_pages = NULL;
 
        drm_gem_object_put_unlocked(gobj);
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        return r;
        }
 
+       /* One for TTM and one for the CS job */
+       amdgpu_bo_list_for_each_entry(e, p->bo_list)
+               e->tv.num_shared = 2;
+
        amdgpu_bo_list_get_list(p->bo_list, &p->validated);
        if (p->bo_list->first_userptr != p->bo_list->num_entries)
                p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        gws = p->bo_list->gws_obj;
        oa = p->bo_list->oa_obj;
 
-       amdgpu_bo_list_for_each_entry(e, p->bo_list)
-               e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+       amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+               /* Make sure we use the exclusive slot for shared BOs */
+               if (bo->prime_shared_count)
+                       e->tv.num_shared = 0;
+               e->bo_va = amdgpu_vm_bo_find(vm, bo);
+       }
 
        if (gds) {
                p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
-       if (r)
-               return r;
-
        p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
 
        if (amdgpu_vm_debug) {
index 5b550706ee76f9ad4aec0f39b3b83a7db5b400c9..7e22be7ca68a386999759964e280dfa3a4682993 100644 (file)
@@ -74,7 +74,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&csa_tv.head);
        csa_tv.bo = &bo->tbo;
-       csa_tv.shared = true;
+       csa_tv.num_shared = 1;
 
        list_add(&csa_tv.head, &list);
        amdgpu_vm_get_pd_bo(vm, &list, &pd);
index 95f4c4139fc60a078d651b8164d11b0befcf766b..d85184b5b35cf8851a1459f0879433da0a40c274 100644 (file)
@@ -248,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                return -ENOMEM;
 
        mutex_lock(&mgr->lock);
-       r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+       r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
        if (r < 0) {
                mutex_unlock(&mgr->lock);
                kfree(ctx);
index c75badfa5c4c6d752ec0e51e82186ef6f381cea5..b60afeade50a3f86db57c26f993f2aacedd1c71e 100644 (file)
@@ -515,7 +515,6 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
  */
 static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
 {
-       amdgpu_asic_init_doorbell_index(adev);
 
        /* No doorbell on SI hardware generation */
        if (adev->asic_type < CHIP_BONAIRE) {
@@ -529,6 +528,8 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
        if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
                return -EINVAL;
 
+       amdgpu_asic_init_doorbell_index(adev);
+
        /* doorbell bar mapping */
        adev->doorbell.base = pci_resource_start(adev->pdev, 2);
        adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -1864,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (adev->gmc.xgmi.num_physical_nodes > 1)
+               amdgpu_xgmi_remove_device(adev);
+
        amdgpu_amdkfd_device_fini(adev);
 
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -2353,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
 }
 
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+       struct amdgpu_device *adev =
+               container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+       adev->asic_reset_res =  amdgpu_asic_reset(adev);
+       if (adev->asic_reset_res)
+               DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+                        adev->asic_reset_res, adev->ddev->unique);
+}
+
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -2451,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
                          amdgpu_device_delay_enable_gfx_off);
 
+       INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
        adev->gfx.gfx_off_req_count = 1;
        adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
 
@@ -3239,6 +3258,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
 
        if (amdgpu_gpu_recovery == -1) {
                switch (adev->asic_type) {
+               case CHIP_BONAIRE:
+               case CHIP_HAWAII:
                case CHIP_TOPAZ:
                case CHIP_TONGA:
                case CHIP_FIJI:
@@ -3328,10 +3349,31 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
         */
        if (need_full_reset) {
                list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-                       r = amdgpu_asic_reset(tmp_adev);
-                       if (r)
-                               DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+                       /* For XGMI run all resets in parallel to speed up the process */
+                       if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                               if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+                                       r = -EALREADY;
+                       } else
+                               r = amdgpu_asic_reset(tmp_adev);
+
+                       if (r) {
+                               DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
                                         r, tmp_adev->ddev->unique);
+                               break;
+                       }
+               }
+
+               /* For XGMI wait for all PSP resets to complete before proceed */
+               if (!r) {
+                       list_for_each_entry(tmp_adev, device_list_handle,
+                                           gmc.xgmi.head) {
+                               if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                                       flush_work(&tmp_adev->xgmi_reset_work);
+                                       r = tmp_adev->asic_reset_res;
+                                       if (r)
+                                               break;
+                               }
+                       }
                }
        }
 
@@ -3518,8 +3560,6 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                if (tmp_adev == adev)
                        continue;
 
-               dev_info(tmp_adev->dev, "GPU reset begin for drm dev %s!\n", adev->ddev->unique);
-
                amdgpu_device_lock_adev(tmp_adev);
                r = amdgpu_device_pre_asic_reset(tmp_adev,
                                                 NULL,
index 7b3d1ebda9df6140524be2d01cf6f1fb11563c85..f4f00217546eacc02c1a4f3dbb1be809c4cbe494 100644 (file)
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        INIT_LIST_HEAD(&duplicates);
 
        tv.bo = &bo->tbo;
-       tv.shared = true;
+       tv.num_shared = 1;
        list_add(&tv.head, &list);
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
                        return -ENOENT;
                abo = gem_to_amdgpu_bo(gobj);
                tv.bo = &abo->tbo;
-               tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+               if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+                       tv.num_shared = 1;
+               else
+                       tv.num_shared = 0;
                list_add(&tv.head, &list);
        } else {
                gobj = NULL;
index d63daba9b17c554912e2a82fd4c1620b5b654455..f1ddfc50bcc763636d6080a527870a10001dd27b 100644 (file)
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
 /*
  * GEM objects.
  */
index 8c57924c075fa5e8a7dc69d6b6c06384f9f6e96d..81e6070d255b513301dca839320d1410f2baa2ed 100644 (file)
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
        unsigned num_physical_nodes;
        /* gpu list in the same hive */
        struct list_head head;
+       bool supported;
 };
 
 struct amdgpu_gmc {
index 9ce8c93ec19bf9422e8e1a8c9f60f1a58291bc87..f877bb78d10a31b11ebb9d8563d7b86055390966 100644 (file)
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
 struct amdgpu_ih_funcs {
        /* ring read/write ptr handling, called from interrupt context */
        u32 (*get_wptr)(struct amdgpu_device *adev);
-       bool (*prescreen_iv)(struct amdgpu_device *adev);
        void (*decode_iv)(struct amdgpu_device *adev,
                          struct amdgpu_iv_entry *entry);
        void (*set_rptr)(struct amdgpu_device *adev);
 };
 
 #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
 #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
 #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
 
index 6b6524f04ce09246b906a9da07a4f8d8b767a417..b7968f4268625e99f7dc62fdddf3b44a896edae5 100644 (file)
@@ -145,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
        u32 ring_index = ih->rptr >> 2;
        struct amdgpu_iv_entry entry;
 
-       /* Prescreening of high-frequency interrupts */
-       if (!amdgpu_ih_prescreen_iv(adev))
-               return;
-
-       /* Before dispatching irq to IP blocks, send it to amdkfd */
-       amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
        entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
        amdgpu_ih_decode_iv(adev, &entry);
 
@@ -371,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
        unsigned client_id = entry->client_id;
        unsigned src_id = entry->src_id;
        struct amdgpu_irq_src *src;
+       bool handled = false;
        int r;
 
        trace_amdgpu_iv(entry);
 
        if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
                DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
-               return;
-       }
 
-       if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+       } else  if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
                DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
-               return;
-       }
 
-       if (adev->irq.virq[src_id]) {
+       } else if (adev->irq.virq[src_id]) {
                generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
-       } else {
-               if (!adev->irq.client[client_id].sources) {
-                       DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
-                                 client_id, src_id);
-                       return;
-               }
 
-               src = adev->irq.client[client_id].sources[src_id];
-               if (!src) {
-                       DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
-                       return;
-               }
+       } else if (!adev->irq.client[client_id].sources) {
+               DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+                         client_id, src_id);
 
+       } else if ((src = adev->irq.client[client_id].sources[src_id])) {
                r = src->funcs->process(adev, src, entry);
-               if (r)
+               if (r < 0)
                        DRM_ERROR("error processing interrupt (%d)\n", r);
+               else if (r)
+                       handled = true;
+
+       } else {
+               DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
        }
+
+       /* Send it to amdkfd as well if it isn't already handled */
+       if (!handled)
+               amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
 }
 
 /**
index cf768acb51dce3f43efc48779d29ecdeda971846..fd271f9746a29ee6799f623557ffaf0354e31b45 100644 (file)
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
                amdgpu_bo_subtract_pin_size(bo);
 
        if (bo->kfd_bo)
-               amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+               amdgpu_amdkfd_unreserve_memory_limit(bo);
 
        amdgpu_bo_kunmap(bo);
 
@@ -607,53 +607,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        return r;
 }
 
-/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
-                              struct amdgpu_ring *ring,
-                              struct amdgpu_bo *bo,
-                              struct reservation_object *resv,
-                              struct dma_fence **fence,
-                              bool direct)
-
-{
-       struct amdgpu_bo *shadow = bo->shadow;
-       uint64_t bo_addr, shadow_addr;
-       int r;
-
-       if (!shadow)
-               return -EINVAL;
-
-       bo_addr = amdgpu_bo_gpu_offset(bo);
-       shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
-       r = reservation_object_reserve_shared(bo->tbo.resv, 1);
-       if (r)
-               goto err;
-
-       r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
-                              amdgpu_bo_size(bo), resv, fence,
-                              direct, false);
-       if (!r)
-               amdgpu_bo_fence(bo, *fence, true);
-
-err:
-       return r;
-}
-
 /**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
index 7d3312d0da1174dcc62e75d782fcb55c5f1cbd39..9291c2f837e95b674dd535f35c4e7daa82cbcb7a 100644 (file)
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared);
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
-                              struct amdgpu_ring *ring,
-                              struct amdgpu_bo *bo,
-                              struct reservation_object *resv,
-                              struct dma_fence **fence, bool direct);
 int amdgpu_bo_validate(struct amdgpu_bo *bo);
 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
                             struct dma_fence **fence);
index 3e44d889f7af2fe6d7a0eaaac054839fd3b3cffd..71913a18d142cb028fe4077b0d4dca0d36be7f02 100644 (file)
@@ -39,8 +39,6 @@
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
 /**
  * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
  * implementation
@@ -332,7 +330,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
        return ret;
 }
 
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
        .attach = amdgpu_gem_map_attach,
        .detach = amdgpu_gem_map_detach,
        .map_dma_buf = drm_gem_map_dma_buf,
index e05dc66b10909b1a3bd5562ce5285c789da2d496..6759d898b3aba103998e177702e10526d03cc62a 100644 (file)
@@ -155,10 +155,22 @@ psp_cmd_submit_buf(struct psp_context *psp,
        return ret;
 }
 
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+bool psp_support_vmr_ring(struct psp_context *psp)
+{
+       if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+               return true;
+       else
+               return false;
+}
+
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+                                struct psp_gfx_cmd_resp *cmd,
                                 uint64_t tmr_mc, uint32_t size)
 {
-       cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+       if (psp_support_vmr_ring(psp))
+               cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+       else
+               cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
        cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
        cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
        cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -192,7 +204,7 @@ static int psp_tmr_load(struct psp_context *psp)
        if (!cmd)
                return -ENOMEM;
 
-       psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+       psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
        DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
                        PSP_TMR_SIZE, psp->tmr_mc_addr);
 
@@ -536,8 +548,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
        int ret;
        struct psp_context *psp = &adev->psp;
 
-       if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+       if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+               psp_ring_destroy(psp, PSP_RING_TYPE__KM);
                goto skip_memalloc;
+       }
 
        psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
        if (!psp->cmd)
index 9ec5d1a666a6dbe082aa7ba46394fa9a5704f28e..10decf70c9aa33d1b32a31f7eabe9cbba30a06f1 100644 (file)
@@ -217,6 +217,7 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
 
 int psp_gpu_reset(struct amdgpu_device *adev);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+bool psp_support_vmr_ring(struct psp_context *psp);
 
 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
 
index 5b75bdc8dc28f673957067f36268ea7fb6eae8e1..335a0edf114b6d313091df85ed150782238c0a64 100644 (file)
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
 {
        ktime_t deadline = ktime_add_us(ktime_get(), 10000);
 
-       if (!ring->funcs->soft_recovery)
+       if (!ring->funcs->soft_recovery || !fence)
                return false;
 
        atomic_inc(&ring->adev->gpu_reset_counter);
index 58a2363040ddca5cbfe2ff1891f1617fb47d073d..fc91f3e54a87da4b4f3bb440b0f0d942e493e3de 100644 (file)
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 {
        entry->priority = 0;
        entry->tv.bo = &vm->root.base.bo->tbo;
-       entry->tv.shared = true;
+       /* One for the VM updates, one for TTM and one for the CS job */
+       entry->tv.num_shared = 3;
        entry->user_pages = NULL;
        list_add(&entry->tv.head, validated);
 }
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 
        ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
-       r = reservation_object_reserve_shared(bo->tbo.resv, 1);
-       if (r)
-               return r;
-
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
        if (r)
                goto error;
@@ -1842,10 +1839,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
-       if (r)
-               goto error_free;
-
        r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
        if (r)
                goto error_free;
@@ -3026,6 +3019,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        if (r)
                goto error_free_root;
 
+       r = reservation_object_reserve_shared(root->tbo.resv, 1);
+       if (r)
+               goto error_unreserve;
+
        r = amdgpu_vm_clear_bo(adev, vm, root,
                               adev->vm_manager.root_level,
                               vm->pte_support_ats);
@@ -3055,7 +3052,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        }
 
        INIT_KFIFO(vm->faults);
-       vm->fault_credit = 16;
 
        return 0;
 
@@ -3267,42 +3263,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                amdgpu_vmid_free_reserved(adev, vm, i);
 }
 
-/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
-                                 unsigned int pasid)
-{
-       struct amdgpu_vm *vm;
-
-       spin_lock(&adev->vm_manager.pasid_lock);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
-       if (!vm) {
-               /* VM not found, can't track fault credit */
-               spin_unlock(&adev->vm_manager.pasid_lock);
-               return true;
-       }
-
-       /* No lock needed. only accessed by IRQ handler */
-       if (!vm->fault_credit) {
-               /* Too many faults in this VM */
-               spin_unlock(&adev->vm_manager.pasid_lock);
-               return false;
-       }
-
-       vm->fault_credit--;
-       spin_unlock(&adev->vm_manager.pasid_lock);
-       return true;
-}
-
 /**
  * amdgpu_vm_manager_init - init the VM manager
  *
index 2a8898d19c8b58076dd88526a4304e3104a67866..e8dcfd59fc93353468aaada4c42e6ab4ca6d85a5 100644 (file)
@@ -229,9 +229,6 @@ struct amdgpu_vm {
        /* Up to 128 pending retry page faults */
        DECLARE_KFIFO(faults, u64, 128);
 
-       /* Limit non-retry fault storms */
-       unsigned int            fault_credit;
-
        /* Points to the KFD process VM info */
        struct amdkfd_process_info *process_info;
 
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
-                                 unsigned int pasid);
 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry);
index fb37e69f1bba3eb6e07efc8f40af4a947ae36734..0b263a9857c6496d95a42d385099d897408e301d 100644 (file)
@@ -78,7 +78,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
                        adev->gmc.xgmi.node_id,
                        adev->gmc.xgmi.hive_id, ret);
        else
-               dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
+               dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
                         adev->gmc.xgmi.physical_node_id,
                                 adev->gmc.xgmi.hive_id);
 
@@ -94,9 +94,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
        int count = 0, ret = -EINVAL;
 
-       if ((adev->asic_type < CHIP_VEGA20) ||
-               (adev->flags & AMD_IS_APU) )
+       if (!adev->gmc.xgmi.supported)
                return 0;
+
        adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
        adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
 
@@ -135,3 +135,23 @@ exit:
        mutex_unlock(&xgmi_mutex);
        return ret;
 }
+
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+       struct amdgpu_hive_info *hive;
+
+       if (!adev->gmc.xgmi.supported)
+               return;
+
+       mutex_lock(&xgmi_mutex);
+
+       hive = amdgpu_get_xgmi_hive(adev);
+       if (!hive)
+               goto exit;
+
+       if (!(hive->number_devices--))
+               mutex_destroy(&hive->hive_lock);
+
+exit:
+       mutex_unlock(&xgmi_mutex);
+}
index 6335bfdcc51dc6ec4b5fa02dda48b2068e0d3318..6151eb9c8ad30bbcaf54eacbcd67b649a7b4fa5a 100644 (file)
@@ -35,5 +35,6 @@ struct amdgpu_hive_info {
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
 
 #endif
index b5775c6a857ba63148d21df914509263383326d0..8a8b4967a101fca3b8daca0908ce39fc2247ae4b 100644 (file)
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
  * [127:96] - reserved
  */
 
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       u32 ring_index = adev->irq.ih.rptr >> 2;
-       u16 pasid;
-
-       switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-       case 146:
-       case 147:
-               pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-               if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-                       return true;
-               break;
-       default:
-               /* Not a VM fault */
-               return true;
-       }
-
-       adev->irq.ih.rptr += 16;
-       return false;
-}
-
  /**
  * cik_ih_decode_iv - decode an interrupt vector
  *
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs cik_ih_funcs = {
        .get_wptr = cik_ih_get_wptr,
-       .prescreen_iv = cik_ih_prescreen_iv,
        .decode_iv = cik_ih_decode_iv,
        .set_rptr = cik_ih_set_rptr
 };
index df5ac4d85a00a5767011c398904a92ab11edce37..9d3ea298e116de7a0fa14e5858b50444ddd7dd46 100644 (file)
@@ -207,34 +207,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
        return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       u32 ring_index = adev->irq.ih.rptr >> 2;
-       u16 pasid;
-
-       switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-       case 146:
-       case 147:
-               pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-               if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-                       return true;
-               break;
-       default:
-               /* Not a VM fault */
-               return true;
-       }
-
-       adev->irq.ih.rptr += 16;
-       return false;
-}
-
 /**
  * cz_ih_decode_iv - decode an interrupt vector
  *
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs cz_ih_funcs = {
        .get_wptr = cz_ih_get_wptr,
-       .prescreen_iv = cz_ih_prescreen_iv,
        .decode_iv = cz_ih_decode_iv,
        .set_rptr = cz_ih_set_rptr
 };
index 1454fc30678332ac59748054148f2120e23abf2b..381f593b0cda83bdda6f67f2d2b41cd8488583c4 100644 (file)
@@ -4068,6 +4068,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
 
 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
 {
+       if (amdgpu_sriov_vf(adev)) {
+               gfx_v8_0_init_csb(adev);
+               return 0;
+       }
+
        adev->gfx.rlc.funcs->stop(adev);
        adev->gfx.rlc.funcs->reset(adev);
        gfx_v8_0_init_pg(adev);
@@ -4947,14 +4952,13 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
 static int gfx_v8_0_pre_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+       u32 grbm_soft_reset = 0;
 
        if ((!adev->gfx.grbm_soft_reset) &&
            (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
-       srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
        /* stop the rlc */
        adev->gfx.rlc.funcs->stop(adev);
@@ -5051,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
 static int gfx_v8_0_post_soft_reset(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+       u32 grbm_soft_reset = 0;
 
        if ((!adev->gfx.grbm_soft_reset) &&
            (!adev->gfx.srbm_soft_reset))
                return 0;
 
        grbm_soft_reset = adev->gfx.grbm_soft_reset;
-       srbm_soft_reset = adev->gfx.srbm_soft_reset;
 
        if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
            REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
index af8ccb014be33a76d53cb69099764fd451c02623..f62d570a81a801e37cd1dcf0d4edb27016f56461 100644 (file)
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
 
 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -645,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+       /*
+        * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+        * instead of picasso_rlc.bin.
+        * Judgment method:
+        * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+        *          or revision >= 0xD8 && revision <= 0xDF
+        * otherwise is PCO FP5
+        */
+       if (!strcmp(chip_name, "picasso") &&
+               (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+               ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
        err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
        if (err)
                goto out;
index 3a4e5d8d5162dd0e4d404ad6cb9bcba36ae68733..ce150de723c98132511304944720629a8a9eb460 100644 (file)
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+                                 struct amdgpu_iv_entry *entry,
+                                 uint64_t addr)
+{
+       struct amdgpu_vm *vm;
+       u64 key;
+       int r;
+
+       /* No PASID, can't identify faulting process */
+       if (!entry->pasid)
+               return true;
+
+       /* Not a retry fault */
+       if (!(entry->src_data[1] & 0x80))
+               return true;
+
+       /* Track retry faults in per-VM fault FIFO. */
+       spin_lock(&adev->vm_manager.pasid_lock);
+       vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+       if (!vm) {
+               /* VM not found, process it normally */
+               spin_unlock(&adev->vm_manager.pasid_lock);
+               return true;
+       }
+
+       key = AMDGPU_VM_FAULT(entry->pasid, addr);
+       r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+       /* Hash table is full or the fault is already being processed,
+        * ignore further page faults
+        */
+       if (r != 0) {
+               spin_unlock(&adev->vm_manager.pasid_lock);
+               return false;
+       }
+       /* No locking required with single writer and single reader */
+       r = kfifo_put(&vm->faults, key);
+       if (!r) {
+               /* FIFO is full. Ignore it until there is space */
+               amdgpu_vm_clear_fault(vm->fault_hash, key);
+               spin_unlock(&adev->vm_manager.pasid_lock);
+               return false;
+       }
+
+       spin_unlock(&adev->vm_manager.pasid_lock);
+       /* It's the first fault for this address, process it normally */
+       return true;
+}
+
 static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
                                struct amdgpu_irq_src *source,
                                struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
        addr = (u64)entry->src_data[0] << 12;
        addr |= ((u64)entry->src_data[1] & 0xf) << 44;
 
+       if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+               return 1; /* This also prevents sending it to KFD */
+
        if (!amdgpu_sriov_vf(adev)) {
                status = RREG32(hub->vm_l2_pro_fault_status);
                WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -902,6 +961,9 @@ static int gmc_v9_0_sw_init(void *handle)
        /* This interrupt is VMC page fault.*/
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
                                &adev->gmc.vm_fault);
+       if (r)
+               return r;
+
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
                                &adev->gmc.vm_fault);
 
@@ -934,7 +996,7 @@ static int gmc_v9_0_sw_init(void *handle)
        }
        adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
 
-       if (adev->asic_type == CHIP_VEGA20) {
+       if (adev->gmc.xgmi.supported) {
                r = gfxhub_v1_1_get_xgmi_info(adev);
                if (r)
                        return r;
index cf0fc61aebe6d5f2de6f9217126dbb1fa94fe05b..a3984d10b604e1a4127009bf63f57cf3a964da7e 100644 (file)
@@ -207,34 +207,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
        return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       u32 ring_index = adev->irq.ih.rptr >> 2;
-       u16 pasid;
-
-       switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-       case 146:
-       case 147:
-               pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-               if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-                       return true;
-               break;
-       default:
-               /* Not a VM fault */
-               return true;
-       }
-
-       adev->irq.ih.rptr += 16;
-       return false;
-}
-
 /**
  * iceland_ih_decode_iv - decode an interrupt vector
  *
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs iceland_ih_funcs = {
        .get_wptr = iceland_ih_get_wptr,
-       .prescreen_iv = iceland_ih_prescreen_iv,
        .decode_iv = iceland_ih_decode_iv,
        .set_rptr = iceland_ih_set_rptr
 };
index 882bd83a28c4d2ff2a7b23221caab9a254bbbb77..0de00fbe9233843cedf3549e4f5cc8bb875094d5 100644 (file)
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
     GFX_CTRL_CMD_ID_ENABLE_INT      = 0x00050000,   /* enable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_DISABLE_INT     = 0x00060000,   /* disable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_MODE1_RST       = 0x00070000,   /* trigger the Mode 1 reset */
+    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x000A0000,   /* send interrupt to psp for updating write pointer of vf */
+    GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
 
     GFX_CTRL_CMD_ID_MAX             = 0x000F0000,   /* max command ID */
 };
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
     GFX_CMD_ID_LOAD_IP_FW   = 0x00000006,   /* load HW IP FW */
     GFX_CMD_ID_DESTROY_TMR  = 0x00000007,   /* destroy TMR region */
     GFX_CMD_ID_SAVE_RESTORE = 0x00000008,   /* save/restore HW IP FW */
-
+    GFX_CMD_ID_SETUP_VMR    = 0x00000009,   /* setup VMR region */
+    GFX_CMD_ID_DESTROY_VMR  = 0x0000000A,   /* destroy VMR region */
 };
 
 
index e5dd052d9e06a5af4ea8d76b99944f42617b3f63..6c9a1b748ca70dfb5eca172ecfc84ef429b16255 100644 (file)
@@ -171,8 +171,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
         * are already been loaded.
         */
        sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-       if (sol_reg)
+       if (sol_reg) {
+               psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+               printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
                return 0;
+       }
 
        /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
        ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -296,26 +299,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
        struct psp_ring *ring = &psp->km_ring;
        struct amdgpu_device *adev = psp->adev;
 
-       /* Write low address of the ring to C2PMSG_69 */
-       psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
-       /* Write high address of the ring to C2PMSG_70 */
-       psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
-       /* Write size of ring to C2PMSG_71 */
-       psp_ring_reg = ring->ring_size;
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
-       /* Write the ring initialization command to C2PMSG_64 */
-       psp_ring_reg = ring_type;
-       psp_ring_reg = psp_ring_reg << 16;
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
-       /* there might be handshake issue with hardware which needs delay */
-       mdelay(20);
-
-       /* Wait for response flag (bit 31) in C2PMSG_64 */
-       ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-                          0x80000000, 0x8000FFFF, false);
+       if (psp_support_vmr_ring(psp)) {
+               /* Write low address of the ring to C2PMSG_102 */
+               psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+               /* Write high address of the ring to C2PMSG_103 */
+               psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+               /* Write the ring initialization command to C2PMSG_101 */
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+                                            GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+               /* there might be handshake issue with hardware which needs delay */
+               mdelay(20);
+
+               /* Wait for response flag (bit 31) in C2PMSG_101 */
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+                                  0x80000000, 0x8000FFFF, false);
+
+       } else {
+               /* Write low address of the ring to C2PMSG_69 */
+               psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+               /* Write high address of the ring to C2PMSG_70 */
+               psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+               /* Write size of ring to C2PMSG_71 */
+               psp_ring_reg = ring->ring_size;
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+               /* Write the ring initialization command to C2PMSG_64 */
+               psp_ring_reg = ring_type;
+               psp_ring_reg = psp_ring_reg << 16;
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+               /* there might be handshake issue with hardware which needs delay */
+               mdelay(20);
+
+               /* Wait for response flag (bit 31) in C2PMSG_64 */
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+                                  0x80000000, 0x8000FFFF, false);
+       }
 
        return ret;
 }
@@ -326,15 +350,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
        int ret = 0;
        struct amdgpu_device *adev = psp->adev;
 
-       /* Write the ring destroy command to C2PMSG_64 */
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+       /* Write the ring destroy command*/
+       if (psp_support_vmr_ring(psp))
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+                                    GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+       else
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+                                    GFX_CTRL_CMD_ID_DESTROY_RINGS);
 
        /* there might be handshake issue with hardware which needs delay */
        mdelay(20);
 
-       /* Wait for response flag (bit 31) in C2PMSG_64 */
-       ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
-                          0x80000000, 0x80000000, false);
+       /* Wait for response flag (bit 31) */
+       if (psp_support_vmr_ring(psp))
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+                                  0x80000000, 0x80000000, false);
+       else
+               ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+                                  0x80000000, 0x80000000, false);
 
        return ret;
 }
@@ -373,7 +406,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
        uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
 
        /* KM (GPCOM) prepare write pointer */
-       psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+       if (psp_support_vmr_ring(psp))
+               psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+       else
+               psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
        /* Update KM RB frame pointer to new frame */
        /* write_frame ptr increments by size of rb_frame in bytes */
@@ -402,7 +438,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
 
        /* Update the write Pointer in DWORDs */
        psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
-       WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+       if (psp_support_vmr_ring(psp)) {
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+       } else
+               WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
 
        return 0;
 }
@@ -547,7 +587,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
        /*send the mode 1 reset command*/
        WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
 
-       mdelay(1000);
+       msleep(500);
 
        offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
 
index 7efb823dd3b113c621b16fe79b934a055e00a75c..7357fd56e61445f18bd16b7a2553e9eaf4d1d960 100644 (file)
@@ -592,7 +592,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
        /*send the mode 1 reset command*/
        WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
 
-       mdelay(1000);
+       msleep(500);
 
        offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
 
index b3d7d9f83202d8a31379ef42288e2a2dc7f5f6af..2938fb9f17cc75c071ee0468434366fa68b39a4c 100644 (file)
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
        return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       /* Process all interrupts */
-       return true;
-}
-
 static void si_ih_decode_iv(struct amdgpu_device *adev,
                             struct amdgpu_iv_entry *entry)
 {
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs si_ih_funcs = {
        .get_wptr = si_ih_get_wptr,
-       .prescreen_iv = si_ih_prescreen_iv,
        .decode_iv = si_ih_decode_iv,
        .set_rptr = si_ih_set_rptr
 };
index 83624e150ca7141db5a0708df7b47f374333b16c..8849b74078d6e5373d82fba3f82980cf1bc4c8a7 100644 (file)
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
+       if (adev->asic_type == CHIP_VEGA20)
+               adev->gmc.xgmi.supported = true;
+
        if (adev->flags & AMD_IS_APU)
                adev->nbio_funcs = &nbio_v7_0_funcs;
        else if (adev->asic_type == CHIP_VEGA20)
index dcdbb4d72472e8dc8732364928a6dac02a162236..15da06ddeb751d9f20e1a461d5a0e4276186a3dc 100644 (file)
@@ -218,34 +218,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
        return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       u32 ring_index = adev->irq.ih.rptr >> 2;
-       u16 pasid;
-
-       switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
-       case 146:
-       case 147:
-               pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
-               if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
-                       return true;
-               break;
-       default:
-               /* Not a VM fault */
-               return true;
-       }
-
-       adev->irq.ih.rptr += 16;
-       return false;
-}
-
 /**
  * tonga_ih_decode_iv - decode an interrupt vector
  *
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs tonga_ih_funcs = {
        .get_wptr = tonga_ih_get_wptr,
-       .prescreen_iv = tonga_ih_prescreen_iv,
        .decode_iv = tonga_ih_decode_iv,
        .set_rptr = tonga_ih_set_rptr
 };
index d84b687240d11d41a28a2841fa5ee8c2a51a1855..2c250b01a903eb62e6724384c4816a701f0eabe4 100644 (file)
@@ -219,90 +219,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
        return (wptr & adev->irq.ih.ptr_mask);
 }
 
-/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
-       u32 ring_index = adev->irq.ih.rptr >> 2;
-       u32 dw0, dw3, dw4, dw5;
-       u16 pasid;
-       u64 addr, key;
-       struct amdgpu_vm *vm;
-       int r;
-
-       dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
-       dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
-       dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
-       dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
-       /* Filter retry page faults, let only the first one pass. If
-        * there are too many outstanding faults, ignore them until
-        * some faults get cleared.
-        */
-       switch (dw0 & 0xff) {
-       case SOC15_IH_CLIENTID_VMC:
-       case SOC15_IH_CLIENTID_UTCL2:
-               break;
-       default:
-               /* Not a VM fault */
-               return true;
-       }
-
-       pasid = dw3 & 0xffff;
-       /* No PASID, can't identify faulting process */
-       if (!pasid)
-               return true;
-
-       /* Not a retry fault, check fault credit */
-       if (!(dw5 & 0x80)) {
-               if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
-                       goto ignore_iv;
-               return true;
-       }
-
-       /* Track retry faults in per-VM fault FIFO. */
-       spin_lock(&adev->vm_manager.pasid_lock);
-       vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
-       addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
-       key = AMDGPU_VM_FAULT(pasid, addr);
-       if (!vm) {
-               /* VM not found, process it normally */
-               spin_unlock(&adev->vm_manager.pasid_lock);
-               return true;
-       } else {
-               r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
-               /* Hash table is full or the fault is already being processed,
-                * ignore further page faults
-                */
-               if (r != 0) {
-                       spin_unlock(&adev->vm_manager.pasid_lock);
-                       goto ignore_iv;
-               }
-       }
-       /* No locking required with single writer and single reader */
-       r = kfifo_put(&vm->faults, key);
-       if (!r) {
-               /* FIFO is full. Ignore it until there is space */
-               amdgpu_vm_clear_fault(vm->fault_hash, key);
-               spin_unlock(&adev->vm_manager.pasid_lock);
-               goto ignore_iv;
-       }
-
-       spin_unlock(&adev->vm_manager.pasid_lock);
-       /* It's the first fault for this address, process it normally */
-       return true;
-
-ignore_iv:
-       adev->irq.ih.rptr += 32;
-       return false;
-}
-
 /**
  * vega10_ih_decode_iv - decode an interrupt vector
  *
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
 
 static const struct amdgpu_ih_funcs vega10_ih_funcs = {
        .get_wptr = vega10_ih_get_wptr,
-       .prescreen_iv = vega10_ih_prescreen_iv,
        .decode_iv = vega10_ih_decode_iv,
        .set_rptr = vega10_ih_set_rptr
 };
index 5f4062b41adddab4cc30797766d18756ff1def79..3623538baf6fc9c20dd79167826ded7d52acbc9e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/mman.h>
+#include <linux/dma-buf.h>
 #include <asm/processor.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
@@ -1273,6 +1274,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                return -EINVAL;
        }
 
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+               if (args->size != kfd_doorbell_process_slice(dev))
+                       return -EINVAL;
+               offset = kfd_get_process_doorbells(dev, p);
+       }
+
        mutex_lock(&p->mutex);
 
        pdd = kfd_bind_process_to_device(dev, p);
@@ -1550,6 +1557,115 @@ copy_from_user_failed:
        return err;
 }
 
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+               struct kfd_process *p, void *data)
+{
+       struct kfd_ioctl_get_dmabuf_info_args *args = data;
+       struct kfd_dev *dev = NULL;
+       struct kgd_dev *dma_buf_kgd;
+       void *metadata_buffer = NULL;
+       uint32_t flags;
+       unsigned int i;
+       int r;
+
+       /* Find a KFD GPU device that supports the get_dmabuf_info query */
+       for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+               if (dev)
+                       break;
+       if (!dev)
+               return -EINVAL;
+
+       if (args->metadata_ptr) {
+               metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+               if (!metadata_buffer)
+                       return -ENOMEM;
+       }
+
+       /* Get dmabuf info from KGD */
+       r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+                                         &dma_buf_kgd, &args->size,
+                                         metadata_buffer, args->metadata_size,
+                                         &args->metadata_size, &flags);
+       if (r)
+               goto exit;
+
+       /* Reverse-lookup gpu_id from kgd pointer */
+       dev = kfd_device_by_kgd(dma_buf_kgd);
+       if (!dev) {
+               r = -EINVAL;
+               goto exit;
+       }
+       args->gpu_id = dev->id;
+       args->flags = flags;
+
+       /* Copy metadata buffer to user mode */
+       if (metadata_buffer) {
+               r = copy_to_user((void __user *)args->metadata_ptr,
+                                metadata_buffer, args->metadata_size);
+               if (r != 0)
+                       r = -EFAULT;
+       }
+
+exit:
+       kfree(metadata_buffer);
+
+       return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+                                  struct kfd_process *p, void *data)
+{
+       struct kfd_ioctl_import_dmabuf_args *args = data;
+       struct kfd_process_device *pdd;
+       struct dma_buf *dmabuf;
+       struct kfd_dev *dev;
+       int idr_handle;
+       uint64_t size;
+       void *mem;
+       int r;
+
+       dev = kfd_device_by_id(args->gpu_id);
+       if (!dev)
+               return -EINVAL;
+
+       dmabuf = dma_buf_get(args->dmabuf_fd);
+       if (!dmabuf)
+               return -EINVAL;
+
+       mutex_lock(&p->mutex);
+
+       pdd = kfd_bind_process_to_device(dev, p);
+       if (IS_ERR(pdd)) {
+               r = PTR_ERR(pdd);
+               goto err_unlock;
+       }
+
+       r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+                                             args->va_addr, pdd->vm,
+                                             (struct kgd_mem **)&mem, &size,
+                                             NULL);
+       if (r)
+               goto err_unlock;
+
+       idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+       if (idr_handle < 0) {
+               r = -EFAULT;
+               goto err_free;
+       }
+
+       mutex_unlock(&p->mutex);
+
+       args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+       return 0;
+
+err_free:
+       amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+       mutex_unlock(&p->mutex);
+       return r;
+}
+
 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
        [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
                            .cmd_drv = 0, .name = #ioctl}
@@ -1635,7 +1751,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
                        kfd_ioctl_set_cu_mask, 0),
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
-                       kfd_ioctl_get_queue_wave_state, 0)
+                       kfd_ioctl_get_queue_wave_state, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+                               kfd_ioctl_get_dmabuf_info, 0),
+
+       AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+                               kfd_ioctl_import_dmabuf, 0),
 
 };
 
index dec8e64f36bdc80ef2ac851ba35d700bf8d32140..0689d4ccbbc0f6fd59164f5ebab76246fca396e6 100644 (file)
@@ -793,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
 int kfd_numa_node_to_apic_id(int numa_node_id);
 
index aa793fcbbdcc23dd5d473f0f4ee96444ea429e36..5f5b2acedbac3bf0e15d8727d5327af37e3f7a79 100644 (file)
@@ -101,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
        down_read(&topology_lock);
 
        list_for_each_entry(top_dev, &topology_device_list, list)
-               if (top_dev->gpu->pdev == pdev) {
+               if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+                       device = top_dev->gpu;
+                       break;
+               }
+
+       up_read(&topology_lock);
+
+       return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+       struct kfd_topology_device *top_dev;
+       struct kfd_dev *device = NULL;
+
+       down_read(&topology_lock);
+
+       list_for_each_entry(top_dev, &topology_device_list, list)
+               if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
                        device = top_dev->gpu;
                        break;
                }
index 32e791d9b9a8ba5290381b2685b4336b1e203031..c13856a46d8eb18c59f16f2c334d2a279900b0c8 100644 (file)
@@ -23,6 +23,9 @@
  *
  */
 
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
 #include "dm_services_types.h"
 #include "dc.h"
 #include "dc/inc/core_types.h"
@@ -54,6 +57,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_mst_helper.h>
 #include <drm/drm_fb_helper.h>
@@ -72,6 +76,7 @@
 
 #include "modules/inc/mod_freesync.h"
 #include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
 
 #define FIRMWARE_RAVEN_DMCU            "amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -129,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                  struct drm_atomic_state *state);
 
+static void handle_cursor_update(struct drm_plane *plane,
+                                struct drm_plane_state *old_plane_state);
 
 
 
@@ -398,6 +405,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        /* Zero all the fields */
        memset(&init_data, 0, sizeof(init_data));
 
+       mutex_init(&adev->dm.dc_lock);
+
        if(amdgpu_dm_irq_init(adev)) {
                DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
                goto error;
@@ -512,6 +521,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        /* DC Destroy TODO: Replace destroy DAL */
        if (adev->dm.dc)
                dc_destroy(&adev->dm.dc);
+
+       mutex_destroy(&adev->dm.dc_lock);
+
        return;
 }
 
@@ -2930,6 +2942,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (dm_state && dm_state->freesync_capable)
                stream->ignore_msa_timing_param = true;
+
 finish:
        if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
                dc_sink_release(sink);
@@ -3001,6 +3014,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
        state->abm_level = cur->abm_level;
        state->vrr_supported = cur->vrr_supported;
        state->freesync_config = cur->freesync_config;
+       state->crc_enabled = cur->crc_enabled;
 
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
 
@@ -3094,10 +3108,8 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
                        rmx_type = RMX_FULL;
                        break;
                case DRM_MODE_SCALE_NONE:
-                       rmx_type = RMX_OFF;
-                       break;
                default:
-                       rmx_type = RMX_ASPECT;
+                       rmx_type = RMX_OFF;
                        break;
                }
 
@@ -3210,10 +3222,11 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
        state = kzalloc(sizeof(*state), GFP_KERNEL);
 
        if (state) {
-               state->scaling = RMX_ASPECT;
+               state->scaling = RMX_OFF;
                state->underscan_enable = false;
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
+               state->max_bpc = 8;
 
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@ -3235,6 +3248,11 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 
        new_state->freesync_capable = state->freesync_capable;
        new_state->abm_level = state->abm_level;
+       new_state->scaling = state->scaling;
+       new_state->underscan_enable = state->underscan_enable;
+       new_state->underscan_hborder = state->underscan_hborder;
+       new_state->underscan_vborder = state->underscan_vborder;
+       new_state->max_bpc = state->max_bpc;
 
        return &new_state->base;
 }
@@ -3607,10 +3625,43 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
        return -EINVAL;
 }
 
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+                                      struct drm_plane_state *new_plane_state)
+{
+       /* Only support async updates on cursor planes. */
+       if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+                                        struct drm_plane_state *new_state)
+{
+       struct drm_plane_state *old_state =
+               drm_atomic_get_old_plane_state(new_state->state, plane);
+
+       if (plane->state->fb != new_state->fb)
+               drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+       plane->state->src_x = new_state->src_x;
+       plane->state->src_y = new_state->src_y;
+       plane->state->src_w = new_state->src_w;
+       plane->state->src_h = new_state->src_h;
+       plane->state->crtc_x = new_state->crtc_x;
+       plane->state->crtc_y = new_state->crtc_y;
+       plane->state->crtc_w = new_state->crtc_w;
+       plane->state->crtc_h = new_state->crtc_h;
+
+       handle_cursor_update(plane, old_state);
+}
+
 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
        .prepare_fb = dm_plane_helper_prepare_fb,
        .cleanup_fb = dm_plane_helper_cleanup_fb,
        .atomic_check = dm_plane_atomic_check,
+       .atomic_async_check = dm_plane_atomic_async_check,
+       .atomic_async_update = dm_plane_atomic_async_update
 };
 
 /*
@@ -4299,6 +4350,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
 static void handle_cursor_update(struct drm_plane *plane,
                                 struct drm_plane_state *old_plane_state)
 {
+       struct amdgpu_device *adev = plane->dev->dev_private;
        struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
        struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
        struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4323,9 +4375,12 @@ static void handle_cursor_update(struct drm_plane *plane,
 
        if (!position.enable) {
                /* turn off cursor */
-               if (crtc_state && crtc_state->stream)
+               if (crtc_state && crtc_state->stream) {
+                       mutex_lock(&adev->dm.dc_lock);
                        dc_stream_set_cursor_position(crtc_state->stream,
                                                      &position);
+                       mutex_unlock(&adev->dm.dc_lock);
+               }
                return;
        }
 
@@ -4343,6 +4398,7 @@ static void handle_cursor_update(struct drm_plane *plane,
        attributes.pitch = attributes.width;
 
        if (crtc_state->stream) {
+               mutex_lock(&adev->dm.dc_lock);
                if (!dc_stream_set_cursor_attributes(crtc_state->stream,
                                                         &attributes))
                        DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4350,6 +4406,7 @@ static void handle_cursor_update(struct drm_plane *plane,
                if (!dc_stream_set_cursor_position(crtc_state->stream,
                                                   &position))
                        DRM_ERROR("DC failed to set cursor position\n");
+               mutex_unlock(&adev->dm.dc_lock);
        }
 }
 
@@ -4423,8 +4480,8 @@ static void update_freesync_state_on_stream(
                dm->freesync_module,
                new_stream,
                &vrr,
-               packet_type_vrr,
-               transfer_func_unknown,
+               PACKET_TYPE_VRR,
+               TRANSFER_FUNC_UNKNOWN,
                &vrr_infopacket);
 
        new_crtc_state->freesync_timing_changed =
@@ -4565,6 +4622,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                                &acrtc_state->stream->vrr_infopacket;
        }
 
+       mutex_lock(&adev->dm.dc_lock);
        dc_commit_updates_for_stream(adev->dm.dc,
                                             surface_updates,
                                             1,
@@ -4572,6 +4630,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                                             &stream_update,
                                             &surface_updates->surface,
                                             state);
+       mutex_unlock(&adev->dm.dc_lock);
 
        DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
                         __func__,
@@ -4586,6 +4645,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
  * with a dc_plane_state and follow the atomic model a bit more closely here.
  */
 static bool commit_planes_to_stream(
+               struct amdgpu_display_manager *dm,
                struct dc *dc,
                struct dc_plane_state **plane_states,
                uint8_t new_plane_count,
@@ -4662,11 +4722,13 @@ static bool commit_planes_to_stream(
                updates[i].scaling_info = &scaling_info[i];
        }
 
+       mutex_lock(&dm->dc_lock);
        dc_commit_updates_for_stream(
                        dc,
                        updates,
                        new_plane_count,
                        dc_stream, stream_update, plane_states, state);
+       mutex_unlock(&dm->dc_lock);
 
        kfree(flip_addr);
        kfree(plane_info);
@@ -4772,7 +4834,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
                dc_stream_attach->abm_level = acrtc_state->abm_level;
 
-               if (false == commit_planes_to_stream(dm->dc,
+               if (false == commit_planes_to_stream(dm,
+                                                       dm->dc,
                                                        plane_states_constructed,
                                                        planes_count,
                                                        acrtc_state,
@@ -4942,7 +5005,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
        if (dc_state) {
                dm_enable_per_frame_crtc_master_sync(dc_state);
+               mutex_lock(&dm->dc_lock);
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
+               mutex_unlock(&dm->dc_lock);
        }
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -5004,6 +5069,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
                /*TODO How it works with MPO ?*/
                if (!commit_planes_to_stream(
+                               dm,
                                dm->dc,
                                status->plane_states,
                                status->plane_count,
@@ -5896,6 +5962,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        ret = -EINVAL;
                        goto fail;
                }
+       } else if (state->legacy_cursor_update) {
+               /*
+                * This is a fast cursor update coming from the plane update
+                * helper, check if it can be done asynchronously for better
+                * performance.
+                */
+               state->async_update = !drm_atomic_helper_async_check(dev, state);
        }
 
        /* Must be success */
index 4326dc256491bf26d8a9fdd727ca61d5c35772f3..25bb91ee80ba4b4b9e402ba58d75eb82a9d13458 100644 (file)
@@ -134,6 +134,14 @@ struct amdgpu_display_manager {
 
        struct drm_modeset_lock atomic_obj_lock;
 
+       /**
+        * @dc_lock:
+        *
+        * Guards access to DC functions that can issue register write
+        * sequences.
+        */
+       struct mutex dc_lock;
+
        /**
         * @irq_handler_list_low_tab:
         *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644 (file)
index 0000000..d898981
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+       TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+       TP_ARGS(read_count, reg, value),
+       TP_STRUCT__entry(
+                       __field(uint32_t, reg)
+                       __field(uint32_t, value)
+               ),
+       TP_fast_assign(
+                       __entry->reg = reg;
+                       __entry->value = value;
+                       *read_count = *read_count + 1;
+               ),
+       TP_printk("reg=0x%08lx, value=0x%08lx",
+                       (unsigned long)__entry->reg,
+                       (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+       TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+       TP_ARGS(write_count, reg, value),
+       TP_STRUCT__entry(
+                       __field(uint32_t, reg)
+                       __field(uint32_t, value)
+               ),
+       TP_fast_assign(
+                       __entry->reg = reg;
+                       __entry->value = value;
+                       *write_count = *write_count + 1;
+               ),
+       TP_printk("reg=0x%08lx, value=0x%08lx",
+                       (unsigned long)__entry->reg,
+                       (unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+       TP_PROTO(unsigned long read_count, unsigned long write_count,
+               unsigned long *last_read, unsigned long *last_write,
+               const char *func, unsigned int line),
+       TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+       TP_STRUCT__entry(
+                       __field(uint32_t, reads)
+                       __field(uint32_t, writes)
+                       __field(uint32_t, read_delta)
+                       __field(uint32_t, write_delta)
+                       __string(func, func)
+                       __field(uint32_t, line)
+                       ),
+       TP_fast_assign(
+                       __entry->reads = read_count;
+                       __entry->writes = write_count;
+                       __entry->read_delta = read_count - *last_read;
+                       __entry->write_delta = write_count - *last_write;
+                       __assign_str(func, func);
+                       __entry->line = line;
+                       *last_read = read_count;
+                       *last_write = write_count;
+                       ),
+       TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+                       __get_str(func), __entry->line,
+                       (unsigned long)__entry->read_delta,
+                       (unsigned long)__entry->reads,
+                       (unsigned long)__entry->write_delta,
+                       (unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
index dba6b57830c78c832f48905514d859601d46803f..d9c57984394bdceb4640452d5d0bde8a39feeb82 100644 (file)
 
 const static char DC_BUILD_ID[] = "production-build";
 
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct.  One per driver.  Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints).  Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display.  Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
+ * (the display directly attached).  It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver.  Represents the hw blocks not in the
+ * main pipeline.  Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed.  There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display.  Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool.  Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context.  Represents the
+ * internal hardware pipeline components.  Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
 /*******************************************************************************
  * Private functions
  ******************************************************************************/
@@ -175,6 +224,17 @@ failed_alloc:
        return false;
 }
 
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+       return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+       kfree(*perf_trace);
+       *perf_trace = NULL;
+}
+
 /**
  *****************************************************************************
  *  Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +300,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
 }
 
 /**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
  * @dc: DC Object
  * @stream: The stream to configure CRC on.
  * @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +352,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
 }
 
 /**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
  * @dc: DC object
  * @stream: The DC stream state of the stream to get CRCs from.
  * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -536,6 +596,8 @@ static void destruct(struct dc *dc)
        if (dc->ctx->created_bios)
                dal_bios_parser_destroy(&dc->ctx->dc_bios);
 
+       dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
        kfree(dc->ctx);
        dc->ctx = NULL;
 
@@ -659,6 +721,12 @@ static bool construct(struct dc *dc,
                goto fail;
        }
 
+       dc_ctx->perf_trace = dc_perf_trace_create();
+       if (!dc_ctx->perf_trace) {
+               ASSERT_CRITICAL(false);
+               goto fail;
+       }
+
        /* Create GPIO service */
        dc_ctx->gpio_service = dal_gpio_service_create(
                        dc_version,
@@ -1329,6 +1397,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
        return overall_type;
 }
 
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
 enum surface_update_type dc_check_update_surfaces_for_stream(
                struct dc *dc,
                struct dc_surface_update *updates,
@@ -1409,6 +1482,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
                        if (stream_update->output_csc_transform)
                                dc_stream_program_csc_matrix(dc, stream);
 
+                       if (stream_update->dither_option) {
+                               resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+                                                                       &pipe_ctx->stream->bit_depth_params);
+                               pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+                                               &stream->bit_depth_params,
+                                               &stream->clamping);
+                       }
+
                        /* Full fe update*/
                        if (update_type == UPDATE_TYPE_FAST)
                                continue;
@@ -1631,6 +1712,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
        return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
 }
 
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
 {
 
@@ -1724,6 +1808,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
        return true;
 }
 
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
 struct dc_sink *dc_link_add_remote_sink(
                struct dc_link *link,
                const uint8_t *edid,
@@ -1782,6 +1871,12 @@ fail_add_sink:
        return NULL;
 }
 
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
 {
        int i;
index 948596a0239206c8edccef8d2396709868a96258..4dc5846de5c4b24f8d7f71d2f2c2bbe2b821d661 100644 (file)
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
        return result;
 }
 
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
 bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
        uint32_t is_hpd_high = 0;
@@ -324,9 +331,9 @@ static enum signal_type get_basic_signal_type(
        return SIGNAL_TYPE_NONE;
 }
 
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
  */
 bool dc_link_is_dp_sink_present(struct dc_link *link)
 {
@@ -593,6 +600,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
        return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
 }
 
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
 bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
 {
        struct dc_sink_init_data sink_init_data = { 0 };
index 4d1f8ac069c11c8ba4ee2d1509b2c361d546a025..849a3a3032f7cb71aea38dac834dc60461846998 100644 (file)
@@ -2196,7 +2196,7 @@ static void get_active_converter_info(
        }
 
        if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
-               uint8_t det_caps[4];
+               uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
                union dwnstream_port_caps_byte0 *port_caps =
                        (union dwnstream_port_caps_byte0 *)det_caps;
                core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
index 0bb844a7b9901a132e72eda966a96ad6299a6f1b..c347afd1030f8e15e44e55888f48cdbe06f6fd34 100644 (file)
@@ -1447,6 +1447,14 @@ static bool are_stream_backends_same(
        return true;
 }
 
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
 bool dc_is_stream_unchanged(
        struct dc_stream_state *old_stream, struct dc_stream_state *stream)
 {
@@ -1457,6 +1465,9 @@ bool dc_is_stream_unchanged(
        return true;
 }
 
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
 bool dc_is_stream_scaling_unchanged(
        struct dc_stream_state *old_stream, struct dc_stream_state *stream)
 {
@@ -1616,6 +1627,9 @@ bool resource_is_stream_unchanged(
        return false;
 }
 
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
 enum dc_status dc_add_stream_to_ctx(
                struct dc *dc,
                struct dc_state *new_ctx,
@@ -1640,6 +1654,9 @@ enum dc_status dc_add_stream_to_ctx(
        return res;
 }
 
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
 enum dc_status dc_remove_stream_from_ctx(
                        struct dc *dc,
                        struct dc_state *new_ctx,
@@ -1860,6 +1877,12 @@ enum dc_status resource_map_pool_resources(
        return DC_ERROR_UNEXPECTED;
 }
 
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy.  Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
 void dc_resource_state_copy_construct_current(
                const struct dc *dc,
                struct dc_state *dst_ctx)
@@ -1875,6 +1898,14 @@ void dc_resource_state_construct(
        dst_ctx->dccg = dc->res_pool->clk_mgr;
 }
 
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed.  Otherwise, an error code.
+ */
 enum dc_status dc_validate_global_state(
                struct dc *dc,
                struct dc_state *new_ctx)
@@ -2202,113 +2233,15 @@ static void set_vendor_info_packet(
                struct dc_info_packet *info_packet,
                struct dc_stream_state *stream)
 {
-       uint32_t length = 0;
-       bool hdmi_vic_mode = false;
-       uint8_t checksum = 0;
-       uint32_t i = 0;
-       enum dc_timing_3d_format format;
-       // Can be different depending on packet content /*todo*/
-       // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
-
-       info_packet->valid = false;
-
-       format = stream->timing.timing_3d_format;
-       if (stream->view_format == VIEW_3D_FORMAT_NONE)
-               format = TIMING_3D_FORMAT_NONE;
-
-       /* Can be different depending on packet content */
-       length = 5;
-
-       if (stream->timing.hdmi_vic != 0
-                       && stream->timing.h_total >= 3840
-                       && stream->timing.v_total >= 2160)
-               hdmi_vic_mode = true;
-
-       /* According to HDMI 1.4a CTS, VSIF should be sent
-        * for both 3D stereo and HDMI VIC modes.
-        * For all other modes, there is no VSIF sent.  */
+       /* SPD info packet for FreeSync */
 
-       if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+       /* Check if Freesync is supported. Return if false. If true,
+        * set the corresponding bit in the info packet
+        */
+       if (!stream->vsp_infopacket.valid)
                return;
 
-       /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
-       info_packet->sb[1] = 0x03;
-       info_packet->sb[2] = 0x0C;
-       info_packet->sb[3] = 0x00;
-
-       /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
-        * The value for HDMI_Video_Format are:
-        * 0x0 (0b000) - No additional HDMI video format is presented in this
-        * packet
-        * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
-        * parameter follows
-        * 0x2 (0b010) - 3D format indication present. 3D_Structure and
-        * potentially 3D_Ext_Data follows
-        * 0x3..0x7 (0b011..0b111) - reserved for future use */
-       if (format != TIMING_3D_FORMAT_NONE)
-               info_packet->sb[4] = (2 << 5);
-       else if (hdmi_vic_mode)
-               info_packet->sb[4] = (1 << 5);
-
-       /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
-        * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
-        * The value for 3D_Structure are:
-        * 0x0 - Frame Packing
-        * 0x1 - Field Alternative
-        * 0x2 - Line Alternative
-        * 0x3 - Side-by-Side (full)
-        * 0x4 - L + depth
-        * 0x5 - L + depth + graphics + graphics-depth
-        * 0x6 - Top-and-Bottom
-        * 0x7 - Reserved for future use
-        * 0x8 - Side-by-Side (Half)
-        * 0x9..0xE - Reserved for future use
-        * 0xF - Not used */
-       switch (format) {
-       case TIMING_3D_FORMAT_HW_FRAME_PACKING:
-       case TIMING_3D_FORMAT_SW_FRAME_PACKING:
-               info_packet->sb[5] = (0x0 << 4);
-               break;
-
-       case TIMING_3D_FORMAT_SIDE_BY_SIDE:
-       case TIMING_3D_FORMAT_SBS_SW_PACKED:
-               info_packet->sb[5] = (0x8 << 4);
-               length = 6;
-               break;
-
-       case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
-       case TIMING_3D_FORMAT_TB_SW_PACKED:
-               info_packet->sb[5] = (0x6 << 4);
-               break;
-
-       default:
-               break;
-       }
-
-       /*PB5: If PB4 is set to 0x1 (extended resolution format)
-        * fill PB5 with the correct HDMI VIC code */
-       if (hdmi_vic_mode)
-               info_packet->sb[5] = stream->timing.hdmi_vic;
-
-       /* Header */
-       info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
-       info_packet->hb1 = 0x01; /* Version */
-
-       /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
-       info_packet->hb2 = (uint8_t) (length);
-
-       /* Calculate checksum */
-       checksum = 0;
-       checksum += info_packet->hb0;
-       checksum += info_packet->hb1;
-       checksum += info_packet->hb2;
-
-       for (i = 1; i <= length; i++)
-               checksum += info_packet->sb[i];
-
-       info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
-       info_packet->valid = true;
+       *info_packet = stream->vsp_infopacket;
 }
 
 static void set_spd_info_packet(
@@ -2364,10 +2297,6 @@ void dc_resource_state_destruct(struct dc_state *context)
        }
 }
 
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
 void dc_resource_state_copy_construct(
                const struct dc_state *src_ctx,
                struct dc_state *dst_ctx)
index 780838a05f44f6da9656bcfa9309cd828cd3dfa2..66e5c4623a49f8be5210ca782b6724728d3a7da4 100644 (file)
@@ -170,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
 }
 
 /**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
  */
 bool dc_stream_set_cursor_attributes(
        struct dc_stream_state *stream,
index dea8bc39c688cdcda34d995d9c14e5f370b88985..4b5bbb13ce7fedcbd8d1d9d1b1be39c2478a9634 100644 (file)
@@ -39,7 +39,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.06"
+#define DC_VER "3.2.08"
 
 #define MAX_SURFACES 3
 #define MAX_STREAMS 6
index 7825e4b5e97c4c888c59dc55a7ee42c0472fb01c..9ddfe4c6938b5a1f65e65eb4e196e9899f4ad008 100644 (file)
@@ -358,15 +358,16 @@ union dc_tiling_info {
        } gfx8;
 
        struct {
+               enum swizzle_mode_values swizzle;
                unsigned int num_pipes;
-               unsigned int num_banks;
+               unsigned int max_compressed_frags;
                unsigned int pipe_interleave;
+
+               unsigned int num_banks;
                unsigned int num_shader_engines;
                unsigned int num_rb_per_se;
-               unsigned int max_compressed_frags;
                bool shaderEnable;
 
-               enum swizzle_mode_values swizzle;
                bool meta_linear;
                bool rb_aligned;
                bool pipe_aligned;
index 8738f27a87088d26a0bca47ebc9087f986037117..29f19d57ff7abd1e132ba89e65caa5d4f6bb2161 100644 (file)
@@ -128,8 +128,10 @@ struct dc_link {
 
 const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
 
-/*
- * Return an enumerated dc_link.  dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
  * boot time.  They cannot be created or destroyed.
  * Use dc_get_caps() to get number of links.
  */
index 771d9f17e26ecb76a7cacb58fd93712198757f09..be34d638e15dfee01416e82e38d209d102403fde 100644 (file)
@@ -56,6 +56,7 @@ struct dc_stream_state {
        struct dc_crtc_timing_adjust adjust;
        struct dc_info_packet vrr_infopacket;
        struct dc_info_packet vsc_infopacket;
+       struct dc_info_packet vsp_infopacket;
 
        struct rect src; /* composition area */
        struct rect dst; /* stream addressable area */
@@ -129,11 +130,13 @@ struct dc_stream_update {
        struct dc_crtc_timing_adjust *adjust;
        struct dc_info_packet *vrr_infopacket;
        struct dc_info_packet *vsc_infopacket;
+       struct dc_info_packet *vsp_infopacket;
 
        bool *dpms_off;
 
        struct colorspace_transform *gamut_remap;
        enum dc_color_space *output_color_space;
+       enum dc_dither_option *dither_option;
 
        struct dc_csc_transform *output_csc_transform;
 
index 6e12d640d020940d887a66bf5d6c4037a7888735..0b20ae23f169241aae3368470a339ad32a5d7255 100644 (file)
@@ -73,10 +73,18 @@ struct hw_asic_id {
        void *atombios_base_address;
 };
 
+struct dc_perf_trace {
+       unsigned long read_count;
+       unsigned long write_count;
+       unsigned long last_entry_read;
+       unsigned long last_entry_write;
+};
+
 struct dc_context {
        struct dc *dc;
 
        void *driver_context; /* e.g. amdgpu_device */
+       struct dc_perf_trace *perf_trace;
        void *cgs_device;
 
        enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
 };
 
 struct dc_panel_patch {
-       unsigned int disconnect_delay;
        unsigned int dppowerup_delay;
        unsigned int extra_t12_ms;
 };
index bc50a8e25f4f2d39ed1e0b3449511aa5e60c6460..87771676acacacccb303b3b504a0964172a8924d 100644 (file)
@@ -117,6 +117,18 @@ void dce100_prepare_bandwidth(
                        false);
 }
 
+void dce100_optimize_bandwidth(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+       dc->res_pool->clk_mgr->funcs->update_clocks(
+                       dc->res_pool->clk_mgr,
+                       context,
+                       true);
+}
+
 /**************************************************************************/
 
 void dce100_hw_sequencer_construct(struct dc *dc)
@@ -125,6 +137,6 @@ void dce100_hw_sequencer_construct(struct dc *dc)
 
        dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
        dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
-       dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+       dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
 }
 
index 1f7f25013217dfad9226ed55a98aee0461b500a0..52d50e24a99538ac04cafaf58b1446ab71cc5385 100644 (file)
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
 
 static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
 
-enum fbc_idle_force {
-       /* Bit 0 - Display registers updated */
-       FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
-       /* Bit 2 - FBC_GRPH_COMP_EN register updated */
-       FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
-       /* Bit 3 - FBC_SRC_SEL register updated */
-       FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
-       /* Bit 4 - FBC_MIN_COMPRESSION register updated */
-       FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
-       /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
-       FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
-       /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
-       FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
-       /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
-       FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
-       /* Bit 24 - Memory write to region 0 defined by MC registers. */
-       FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
-       /* Bit 25 - Memory write to region 1 defined by MC registers */
-       FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
-       /* Bit 26 - Memory write to region 2 defined by MC registers */
-       FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
-       /* Bit 27 - Memory write to region 3 defined by MC registers. */
-       FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
-       /* Bit 28 - Memory write from any client other than MCIF */
-       FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
-       /* Bit 29 - CG statics screen signal is inactive */
-       FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
 static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
 {
        return 256 * ((pixels + 255) / 256);
 }
 
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
 {
-       uint32_t value, frame_count;
+       uint32_t value;
+       uint32_t frame_count;
+       uint32_t status_pos;
        uint32_t retry = 0;
-       uint32_t status_pos =
-                       dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+       struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+       cp110->offsets = reg_offsets[crtc_inst];
+
+       status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
 
 
        /* Only if CRTC is enabled and counter is moving we wait for one frame. */
-       if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+       if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
                /* Resetting LB on VBlank */
-               value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+               value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
                set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
                set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
-               dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+               dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
 
-               frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+               frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
 
 
                for (retry = 10000; retry > 0; retry--) {
-                       if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+                       if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
                                break;
                        udelay(10);
                }
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
                        dm_error("Frame count did not increase for 100ms.\n");
 
                /* Resetting LB on VBlank */
-               value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+               value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
                set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
                set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
-               dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+               dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
        }
-
 }
 
 static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
                uint32_t addr;
                uint32_t value, misc_value;
 
-
                addr = mmFBC_CNTL;
                value = dm_read_reg(compressor->ctx, addr);
                set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+               /* params->inst is valid HW CRTC instance start from 0 */
                set_reg_field_value(
                        value,
                        params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
 
                /* Keep track of enum controller_id FBC is attached to */
                compressor->is_enabled = true;
-               compressor->attached_inst = params->inst;
-               cp110->offsets = reg_offsets[params->inst];
+               /* attached_inst is SW CRTC instance start from 1
+                * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+                */
+               compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
 
                /* Toggle it as there is bug in HW */
                set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
 void dce110_compressor_disable_fbc(struct compressor *compressor)
 {
        struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+       uint32_t crtc_inst = 0;
 
        if (compressor->options.bits.FBC_SUPPORT) {
-               if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+               if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
                        uint32_t reg_data;
                        /* Turn off compression */
                        reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
                        wait_for_fbc_state_changed(cp110, false);
                }
 
-               /* Sync line buffer  - dce100/110 only*/
-               reset_lb_on_vblank(compressor->ctx);
+               /* Sync line buffer which fbc was attached to dce100/110 only */
+               if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+                       reset_lb_on_vblank(compressor,
+                                       crtc_inst - CONTROLLER_ID_D0);
        }
 }
 
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
        uint32_t compressed_surf_address_low_part =
                compressor->compr_surface_address.addr.low_part;
 
+       cp110->offsets = reg_offsets[params->inst];
+
        /* Clear content first. */
        dm_write_reg(
                compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
        value = dm_read_reg(compressor->ctx, addr);
        set_reg_field_value(
                value,
-               fbc_trigger |
-               FBC_IDLE_FORCE_GRPH_COMP_EN |
-               FBC_IDLE_FORCE_SRC_SEL_CHANGE |
-               FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
-               FBC_IDLE_FORCE_ALPHA_COMP_EN |
-               FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
-               FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+               fbc_trigger,
                FBC_IDLE_FORCE_CLEAR_MASK,
                FBC_IDLE_FORCE_CLEAR_MASK);
        dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
        compressor->base.channel_interleave_size = 0;
        compressor->base.dram_channels_num = 0;
        compressor->base.lpt_channels_num = 0;
-       compressor->base.attached_inst = 0;
+       compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
        compressor->base.is_enabled = false;
        compressor->base.funcs = &dce110_compressor_funcs;
 
index 2f062bacd78a6d08c1b4d82e95f5b5e30845d7ad..6349ba7bec7c3bc317af484e2cf84c76f24ec29a 100644 (file)
@@ -1766,12 +1766,13 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
  *  Check if FBC can be enabled
  */
 static bool should_enable_fbc(struct dc *dc,
-                             struct dc_state *context,
-                             uint32_t *pipe_idx)
+               struct dc_state *context,
+               uint32_t *pipe_idx)
 {
        uint32_t i;
        struct pipe_ctx *pipe_ctx = NULL;
        struct resource_context *res_ctx = &context->res_ctx;
+       unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
 
 
        ASSERT(dc->fbc_compressor);
@@ -1786,14 +1787,28 @@ static bool should_enable_fbc(struct dc *dc,
 
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (res_ctx->pipe_ctx[i].stream) {
+
                        pipe_ctx = &res_ctx->pipe_ctx[i];
-                       *pipe_idx = i;
-                       break;
+
+                       if (!pipe_ctx)
+                               continue;
+
+                       /* fbc not applicable on underlay pipe */
+                       if (pipe_ctx->pipe_idx != underlay_idx) {
+                               *pipe_idx = i;
+                               break;
+                       }
                }
        }
 
-       /* Pipe context should be found */
-       ASSERT(pipe_ctx);
+       if (i == dc->res_pool->pipe_count)
+               return false;
+
+       if (!pipe_ctx->stream->sink)
+               return false;
+
+       if (!pipe_ctx->stream->sink->link)
+               return false;
 
        /* Only supports eDP */
        if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1817,8 +1832,9 @@ static bool should_enable_fbc(struct dc *dc,
 /*
  *  Enable FBC
  */
-static void enable_fbc(struct dc *dc,
-                      struct dc_state *context)
+static void enable_fbc(
+               struct dc *dc,
+               struct dc_state *context)
 {
        uint32_t pipe_idx = 0;
 
@@ -1828,10 +1844,9 @@ static void enable_fbc(struct dc *dc,
                struct compressor *compr = dc->fbc_compressor;
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
 
-
                params.source_view_width = pipe_ctx->stream->timing.h_addressable;
                params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+               params.inst = pipe_ctx->stream_res.tg->inst;
                compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
 
                compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2046,10 +2061,10 @@ enum dc_status dce110_apply_ctx_to_hw(
                        return status;
        }
 
-       dcb->funcs->set_scratch_critical_state(dcb, false);
-
        if (dc->fbc_compressor)
-               enable_fbc(dc, context);
+               enable_fbc(dc, dc->current_state);
+
+       dcb->funcs->set_scratch_critical_state(dcb, false);
 
        return DC_OK;
 }
@@ -2408,7 +2423,6 @@ static void dce110_program_front_end_for_pipe(
        struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct xfm_grph_csc_adjustment adjust;
        struct out_csc_color_matrix tbl_entry;
-       unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
        unsigned int i;
        DC_LOGGER_INIT();
        memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2449,15 +2463,6 @@ static void dce110_program_front_end_for_pipe(
 
        program_scaler(dc, pipe_ctx);
 
-       /* fbc not applicable on Underlay pipe */
-       if (dc->fbc_compressor && old_pipe->stream &&
-           pipe_ctx->pipe_idx != underlay_idx) {
-               if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
-                       dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
-               else
-                       enable_fbc(dc, dc->current_state);
-       }
-
        mi->funcs->mem_input_program_surface_config(
                        mi,
                        plane_state->format,
@@ -2534,6 +2539,9 @@ static void dce110_apply_ctx_for_surface(
        if (num_planes == 0)
                return;
 
+       if (dc->fbc_compressor)
+               dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
                struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2576,6 +2584,9 @@ static void dce110_apply_ctx_for_surface(
                        (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
                        dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
        }
+
+       if (dc->fbc_compressor)
+               enable_fbc(dc, dc->current_state);
 }
 
 static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
index f9d7d2c26cc2218cf49aa663d2820df6e9524f11..54abedbf1b43f7f2946ec574970d0654b037d169 100644 (file)
@@ -328,12 +328,10 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
 
        *smu_req_cur = smu_req;
 }
-
 static const struct clk_mgr_funcs dcn1_funcs = {
        .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
        .update_clocks = dcn1_update_clocks
 };
-
 struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
 {
        struct dc_debug_options *debug = &ctx->dc->debug;
@@ -373,3 +371,5 @@ struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
 
        return &clk_mgr_dce->base;
 }
+
+
index 9dbaf65780066902ca2968c805b5204ff88c71e0..a995eda443a3ff927183960bc3568e01bbb5bbec 100644 (file)
 
 #include "../dce/dce_clk_mgr.h"
 
+struct clk_bypass {
+       uint32_t dcfclk_bypass;
+       uint32_t dispclk_pypass;
+       uint32_t dprefclk_bypass;
+};
+
 void dcn1_pplib_apply_display_requirements(
        struct dc *dc,
        struct dc_state *context);
index 3eea44092a04949eb3ae8deaf8ee41b5e472b6a4..7469333a2c8a5166800e9db7b830c20cbc788818 100644 (file)
@@ -324,7 +324,7 @@ bool cm_helper_translate_curve_to_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE();
+       PERF_TRACE_CTX(output_tf->ctx);
 
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
@@ -513,7 +513,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
                return false;
 
-       PERF_TRACE();
+       PERF_TRACE_CTX(output_tf->ctx);
 
        corner_points = lut_params->corner_points;
        rgb_resulted = lut_params->rgb_resulted;
index 211bb240a7203b530228d7fc2389b79f24d42c44..cd469014baa39f2ce505c7006f09a44e6e6ee9f5 100644 (file)
@@ -44,6 +44,7 @@
 #include "dcn10_hubp.h"
 #include "dcn10_hubbub.h"
 #include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
 
 static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
 {
@@ -463,19 +464,22 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
 static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
 {
        unsigned int chars_printed = 0;
+       unsigned int remaining_buffer = bufSize;
 
-       chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
-               "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
-               "%d,%d,%d,%d,%d,%d,%d\n",
+       chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+               "dppclk,fclk,socclk\n"
+               "%d,%d,%d,%d,%d,%d\n",
                dc->current_state->bw.dcn.clk.dcfclk_khz,
                dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
                dc->current_state->bw.dcn.clk.dispclk_khz,
                dc->current_state->bw.dcn.clk.dppclk_khz,
-               dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
                dc->current_state->bw.dcn.clk.fclk_khz,
                dc->current_state->bw.dcn.clk.socclk_khz);
 
-       return chars_printed;
+       remaining_buffer -= chars_printed;
+       pBuf += chars_printed;
+
+       return bufSize - remaining_buffer;
 }
 
 static void dcn10_clear_otpc_underflow(struct dc *dc)
@@ -538,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
         *  Bit 0 - 15: Hardware block mask
         *  Bit 15: 1 = Invariant Only, 0 = All
         */
-       const unsigned int DC_HW_STATE_MASK_HUBBUB      = 0x1;
-       const unsigned int DC_HW_STATE_MASK_HUBP        = 0x2;
-       const unsigned int DC_HW_STATE_MASK_RQ          = 0x4;
-       const unsigned int DC_HW_STATE_MASK_DLG         = 0x8;
-       const unsigned int DC_HW_STATE_MASK_TTU         = 0x10;
-       const unsigned int DC_HW_STATE_MASK_CM          = 0x20;
-       const unsigned int DC_HW_STATE_MASK_MPCC        = 0x40;
-       const unsigned int DC_HW_STATE_MASK_OTG         = 0x80;
-       const unsigned int DC_HW_STATE_MASK_CLOCKS      = 0x100;
-       const unsigned int DC_HW_STATE_INVAR_ONLY       = 0x8000;
+       const unsigned int DC_HW_STATE_MASK_HUBBUB                      = 0x1;
+       const unsigned int DC_HW_STATE_MASK_HUBP                        = 0x2;
+       const unsigned int DC_HW_STATE_MASK_RQ                          = 0x4;
+       const unsigned int DC_HW_STATE_MASK_DLG                         = 0x8;
+       const unsigned int DC_HW_STATE_MASK_TTU                         = 0x10;
+       const unsigned int DC_HW_STATE_MASK_CM                          = 0x20;
+       const unsigned int DC_HW_STATE_MASK_MPCC                        = 0x40;
+       const unsigned int DC_HW_STATE_MASK_OTG                         = 0x80;
+       const unsigned int DC_HW_STATE_MASK_CLOCKS                      = 0x100;
+       const unsigned int DC_HW_STATE_INVAR_ONLY                       = 0x8000;
 
        unsigned int chars_printed = 0;
        unsigned int remaining_buf_size = bufSize;
@@ -603,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
                remaining_buf_size -= chars_printed;
        }
 
-       if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+       if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
                chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+               pBuf += chars_printed;
+               remaining_buf_size -= chars_printed;
+       }
 }
index 47dbe4bb294aeb7218fa2092e2e03065889182de..5d4772dec0ba5454267b6fcfeae1b55d6b6cf049 100644 (file)
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
 #define MMHUB_SR(reg_name)\
                .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
                                        mm ## reg_name
-
 /* macros to expend register list macro defined in HW object header file
  * end *********************/
 
@@ -436,7 +435,6 @@ static const struct dcn_optc_mask tg_mask = {
        TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
 };
 
-
 static const struct bios_registers bios_regs = {
                NBIO_SR(BIOS_SCRATCH_0),
                NBIO_SR(BIOS_SCRATCH_3),
@@ -497,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
                CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
 };
 
-
 static const struct resource_caps res_cap = {
                .num_timing_generator = 4,
                .num_opp = 4,
@@ -1277,7 +1274,6 @@ static bool construct(
                        goto fail;
                }
        }
-
        pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
        if (pool->base.clk_mgr == NULL) {
                dm_error("DC: failed to create display clock!\n");
index 34a701ca879e725a3993b679e9bbeb362044f491..65663f4d93e10ad3e7905efbdb70b275f27aaf5b 100644 (file)
@@ -33,6 +33,7 @@
 
 #define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
 #define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
 
 #endif
 
index beb08fd12b1d20cde10bcd44994b34348bb67f3c..0029a39efb1cb5ffe09e6d775af18cfcdfbfd2be 100644 (file)
@@ -102,7 +102,7 @@ struct pp_smu_funcs_rv {
         */
        void (*set_display_count)(struct pp_smu *pp, int count);
 
-       /* which SMU message?  are reader and writer WM separate SMU msg? */
+       /* reader and writer WM's are sent together as part of one table*/
        /*
         * PPSMC_MSG_SetDriverDramAddrHigh
         * PPSMC_MSG_SetDriverDramAddrLow
index 28128c02de00417d4d1ca6b5c43a87f4649ca01c..1961cc6d91439bafbfdfec845ffcdd650c13bc58 100644 (file)
@@ -31,6 +31,8 @@
 
 #define __DM_SERVICES_H__
 
+#include "amdgpu_dm_trace.h"
+
 /* TODO: remove when DC is complete. */
 #include "dm_services_types.h"
 #include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
        }
 #endif
        value = cgs_read_register(ctx->cgs_device, address);
+       trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
 
        return value;
 }
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
        }
 #endif
        cgs_write_register(ctx->cgs_device, address, value);
+       trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
 }
 
 static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
 /*
  * performance tracing
  */
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE()   dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE()   trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+               CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+               &CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX)  trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+               __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+               &__CTX->perf_trace->last_entry_write, __func__, __LINE__)
 
 
 /*
index f20161c5706d7278cc0c48f92eedebf0bdf0ce73..dada0429602585837cd9566ffe5262e3f2dcf86d 100644 (file)
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
        struct dc_context *ctx)
 {
        struct gpio_service *service;
-
        uint32_t index_of_id;
 
        service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
                goto failure_1;
        }
 
-       /* allocate and initialize business storage */
+       /* allocate and initialize busyness storage */
        {
-               const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
                index_of_id = 0;
                service->ctx = ctx;
 
                do {
                        uint32_t number_of_bits =
                                service->factory.number_of_pins[index_of_id];
+                       uint32_t i = 0;
 
-                       uint32_t number_of_uints =
-                               (number_of_bits + bits_per_uint - 1) /
-                               bits_per_uint;
-
-                       uint32_t *slot;
-
-                       if (number_of_bits) {
-                               uint32_t index_of_uint = 0;
+                       if (number_of_bits)  {
+                               service->busyness[index_of_id] =
+                                       kcalloc(number_of_bits, sizeof(char),
+                                               GFP_KERNEL);
 
-                               slot = kcalloc(number_of_uints,
-                                              sizeof(uint32_t),
-                                              GFP_KERNEL);
-
-                               if (!slot) {
+                               if (!service->busyness[index_of_id]) {
                                        BREAK_TO_DEBUGGER();
                                        goto failure_2;
                                }
 
                                do {
-                                       slot[index_of_uint] = 0;
-
-                                       ++index_of_uint;
-                               } while (index_of_uint < number_of_uints);
-                       } else
-                               slot = NULL;
-
-                       service->busyness[index_of_id] = slot;
+                                       service->busyness[index_of_id][i] = 0;
+                                       ++i;
+                               } while (i < number_of_bits);
+                       } else {
+                               service->busyness[index_of_id] = NULL;
+                       }
 
                        ++index_of_id;
                } while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
 
 failure_2:
        while (index_of_id) {
-               uint32_t *slot;
-
                --index_of_id;
-
-               slot = service->busyness[index_of_id];
-
-               kfree(slot);
+               kfree(service->busyness[index_of_id]);
        }
 
 failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
                uint32_t index_of_id = 0;
 
                do {
-                       uint32_t *slot = (*ptr)->busyness[index_of_id];
-
-                       kfree(slot);
+                       kfree((*ptr)->busyness[index_of_id]);
 
                        ++index_of_id;
                } while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
        enum gpio_id id,
        uint32_t en)
 {
-       const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-       const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
-       return 0 != (*slot & (1 << (en % bits_per_uint)));
+       return service->busyness[id][en];
 }
 
 static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
        enum gpio_id id,
        uint32_t en)
 {
-       const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-       service->busyness[id][en / bits_per_uint] |=
-               (1 << (en % bits_per_uint));
+       service->busyness[id][en] = true;
 }
 
 static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
        enum gpio_id id,
        uint32_t en)
 {
-       const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
-       service->busyness[id][en / bits_per_uint] &=
-               ~(1 << (en % bits_per_uint));
+       service->busyness[id][en] = false;
 }
 
 enum gpio_result dal_gpio_service_open(
index c7f3081f59ccf470b3759bf59f538cabf67b9711..1d501a43d13b44505f2a7dc08868b82cfbb01f28 100644 (file)
@@ -36,10 +36,9 @@ struct gpio_service {
        /*
         * @brief
         * Business storage.
-        * For each member of 'enum gpio_id',
-        * store array of bits (packed into uint32_t slots),
-        * index individual bit by 'en' value */
-       uint32_t *busyness[GPIO_ID_COUNT];
+        * one byte For each member of 'enum gpio_id'
+        */
+       char *busyness[GPIO_ID_COUNT];
 };
 
 enum gpio_result dal_gpio_service_open(
index bcb18f5e1e60246f6bee825a3e028bbad8561fa2..7a147a9762a01425a42a64dc75cd91580eaf5f55 100644 (file)
@@ -77,6 +77,7 @@ struct compressor_funcs {
 };
 struct compressor {
        struct dc_context *ctx;
+       /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
        uint32_t attached_inst;
        bool is_enabled;
        const struct compressor_funcs *funcs;
index e3ee96afa60e8b0d11c6422a4589cd0306d5461c..b168a5e9dd9dcef7fa9c0ae2e95b05505300e938 100644 (file)
@@ -272,6 +272,17 @@ union bw_context {
        struct dce_bw_output dce;
 };
 
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
 struct dc_state {
        struct dc_stream_state *streams[MAX_PIPES];
        struct dc_stream_status stream_status[MAX_PIPES];
@@ -279,7 +290,6 @@ struct dc_state {
 
        struct resource_context res_ctx;
 
-       /* The output from BW and WM calculations. */
        union bw_context bw;
 
        /* Note: these are big structures, do *not* put on stack! */
index bbecbaefb741c47026e75a020bb5ec30fb44aa8a..479b77c2e89e222467753209ebf40524a07bd7c1 100644 (file)
@@ -1761,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
        struct pwl_float_data *rgb_user = NULL;
        struct pwl_float_data_ex *curve = NULL;
-       struct gamma_pixel *axix_x = NULL;
+       struct gamma_pixel *axis_x = NULL;
        struct pixel_gamma_point *coeff = NULL;
        enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
        bool ret = false;
@@ -1787,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
                         GFP_KERNEL);
        if (!curve)
                goto curve_alloc_fail;
-       axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+       axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
                          GFP_KERNEL);
-       if (!axix_x)
-               goto axix_x_alloc_fail;
+       if (!axis_x)
+               goto axis_x_alloc_fail;
        coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
                         GFP_KERNEL);
        if (!coeff)
@@ -1803,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
        tf = input_tf->tf;
 
        build_evenly_distributed_points(
-                       axix_x,
+                       axis_x,
                        ramp->num_entries,
                        dividers);
 
@@ -1828,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
        tf_pts->x_point_at_y1_blue = 1;
 
        map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
-                       coordinates_x, axix_x, curve,
+                       coordinates_x, axis_x, curve,
                        MAX_HW_POINTS, tf_pts,
                        mapUserRamp && ramp->type != GAMMA_CUSTOM);
        if (ramp->type == GAMMA_CUSTOM)
@@ -1838,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
        kvfree(coeff);
 coeff_alloc_fail:
-       kvfree(axix_x);
-axix_x_alloc_fail:
+       kvfree(axis_x);
+axis_x_alloc_fail:
        kvfree(curve);
 curve_alloc_fail:
        kvfree(rgb_user);
index 620a171620ee8dbb4bf77eedb3da5eb8937973ff..1544ed3f17473cb80c0009c338bccd60f245a52c 100644 (file)
@@ -608,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
 static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
                struct dc_info_packet *infopacket)
 {
-       if (app_tf != transfer_func_unknown) {
+       if (app_tf != TRANSFER_FUNC_UNKNOWN) {
                infopacket->valid = true;
 
                infopacket->sb[6] |= 0x08;  // PB6 = [Bit 3 = Native Color Active]
 
-               if (app_tf == transfer_func_gamma_22) {
+               if (app_tf == TRANSFER_FUNC_GAMMA_22) {
                        infopacket->sb[9] |= 0x04;  // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
                }
        }
@@ -688,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
                return;
 
        switch (packet_type) {
-       case packet_type_fs2:
+       case PACKET_TYPE_FS2:
                build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
                break;
-       case packet_type_vrr:
-       case packet_type_fs1:
+       case PACKET_TYPE_VRR:
+       case PACKET_TYPE_FS1:
        default:
                build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
        }
index 786b34380f852eff3fc456eed8f05ed3154b60d8..5b1c9a4c764302e57d27830721f3c3bc6072357d 100644 (file)
 #ifndef MOD_INFO_PACKET_H_
 #define MOD_INFO_PACKET_H_
 
-struct info_packet_inputs {
-       const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
 
-struct info_packets {
-       struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
 
-void mod_build_infopackets(struct info_packet_inputs *inputs,
-               struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+               struct dc_info_packet *info_packet);
 
 #endif
index 238c431ae4834c3fcb0cc27f935b7179c6fcb489..1bd02c0ac30c3f0bde291918146d60a7e7ecfcba 100644 (file)
  *
  */
 
-
 #ifndef MOD_SHARED_H_
 #define MOD_SHARED_H_
 
 enum color_transfer_func {
-       transfer_func_unknown,
-       transfer_func_srgb,
-       transfer_func_bt709,
-       transfer_func_pq2084,
-       transfer_func_pq2084_interim,
-       transfer_func_linear_0_1,
-       transfer_func_linear_0_125,
-       transfer_func_dolbyvision,
-       transfer_func_gamma_22,
-       transfer_func_gamma_26
+       TRANSFER_FUNC_UNKNOWN,
+       TRANSFER_FUNC_SRGB,
+       TRANSFER_FUNC_BT709,
+       TRANSFER_FUNC_PQ2084,
+       TRANSFER_FUNC_PQ2084_INTERIM,
+       TRANSFER_FUNC_LINEAR_0_1,
+       TRANSFER_FUNC_LINEAR_0_125,
+       TRANSFER_FUNC_GAMMA_22,
+       TRANSFER_FUNC_GAMMA_26
 };
 
 enum vrr_packet_type {
-       packet_type_vrr,
-       packet_type_fs1,
-       packet_type_fs2
+       PACKET_TYPE_VRR,
+       PACKET_TYPE_FS1,
+       PACKET_TYPE_FS2
 };
 
+
 #endif /* MOD_SHARED_H_ */
index ff8bfb9b43b0c944743fa5f9b956e430ad57bef6..db06fab2ad5cd06c8c9834429c841e4281fffb5a 100644 (file)
 
 #include "mod_info_packet.h"
 #include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
 
 enum ColorimetryRGBDP {
        ColorimetryRGB_DP_sRGB               = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
        ColorimetryYCC_DP_ITU2020YCbCr  = 7,
 };
 
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
                struct dc_info_packet *info_packet)
 {
        unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
         *   DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
         *   (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
         *   the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
-        *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+        *   MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
         */
        if (vscPacketRevision == 0x5) {
                /* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
 
 }
 
-void mod_build_infopackets(struct info_packet_inputs *inputs,
-               struct info_packets *info_packets)
-{
-       if (info_packets->pVscInfoPacket != NULL)
-               mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644 (file)
index 0000000..8f51587
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL                                                                          0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX                                                                 0
+#define mmMC_VM_XGMI_LFB_SIZE                                                                          0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX                                                                 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644 (file)
index 0000000..0a6b072
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT                                                             0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT                                                             0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK                                                               0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK                                                               0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT                                                               0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK                                                                 0x0000FFFFL
+
+#endif
index 58ac0b90c310e721ea68ba2eae2b04473cf56e90..8154d67388ccef99185431ce8d04b7b16af59762 100644 (file)
@@ -188,8 +188,8 @@ struct tile_config {
  */
 #define ALLOC_MEM_FLAGS_VRAM           (1 << 0)
 #define ALLOC_MEM_FLAGS_GTT            (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR                (1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL       (1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR                (1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL       (1 << 3)
 
 /*
  * Allocation flags attributes/access options.
index 980e696989b13444b31e25c5debafb1e571f9323..1479ea1dc3e7166fe96c7e759bf06c33aaba2683 100644 (file)
@@ -276,6 +276,10 @@ struct amd_pm_funcs {
                struct amd_pp_simple_clock_info *clocks);
        int (*notify_smu_enable_pwe)(void *handle);
        int (*enable_mgpu_fan_boost)(void *handle);
+       int (*set_active_display_count)(void *handle, uint32_t count);
+       int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+       int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+       int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
 };
 
 #endif
index b68c2e0fef018d51b60a74596b89ec1441dd4cd1..9bc27f468d5be578fe4f014d1439698703eb4972 100644 (file)
@@ -725,7 +725,7 @@ static int pp_dpm_force_clock_level(void *handle,
        }
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-               pr_info("force clock level is for dpm manual mode only.\n");
+               pr_debug("force clock level is for dpm manual mode only.\n");
                return -EINVAL;
        }
 
@@ -899,7 +899,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
        }
 
        if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
-               pr_info("power profile setting is for manual dpm mode only.\n");
+               pr_debug("power profile setting is for manual dpm mode only.\n");
                return ret;
        }
 
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
                                        &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
 
        if (ret) {
-               pr_info("Error in phm_get_clock_info \n");
+               pr_debug("Error in phm_get_clock_info \n");
                mutex_unlock(&hwmgr->smu_lock);
                return -EINVAL;
        }
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
        return 0;
 }
 
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+               pr_debug("%s was not implemented.\n", __func__);
+               return -EINVAL;;
+       }
+
+       mutex_lock(&hwmgr->smu_lock);
+       hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+       mutex_unlock(&hwmgr->smu_lock);
+
+       return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+               pr_debug("%s was not implemented.\n", __func__);
+               return -EINVAL;;
+       }
+
+       mutex_lock(&hwmgr->smu_lock);
+       hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+       mutex_unlock(&hwmgr->smu_lock);
+
+       return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+               pr_debug("%s was not implemented.\n", __func__);
+               return -EINVAL;;
+       }
+
+       mutex_lock(&hwmgr->smu_lock);
+       hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+       mutex_unlock(&hwmgr->smu_lock);
+
+       return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+       struct pp_hwmgr *hwmgr = handle;
+       int ret = 0;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       mutex_lock(&hwmgr->smu_lock);
+       ret = phm_set_active_display_count(hwmgr, count);
+       mutex_unlock(&hwmgr->smu_lock);
+
+       return ret;
+}
+
 static const struct amd_pm_funcs pp_dpm_funcs = {
        .load_firmware = pp_dpm_load_fw,
        .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
        .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
        .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+       .set_active_display_count = pp_set_active_display_count,
+       .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+       .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+       .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
 };
index 85119c2bdcc8ff2e2bbd54b53e204b095decb2d4..333b9b8459715f0e58f7e55a0123fc120e2a600e 100644 (file)
@@ -286,8 +286,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
        if (display_config == NULL)
                return -EINVAL;
 
-       if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
-               hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+       if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+               hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
 
        for (index = 0; index < display_config->num_path_including_non_display; index++) {
                if (display_config->displays[index].controller_id != 0)
@@ -478,3 +478,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
 
        return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
 }
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (!hwmgr->hwmgr_func->set_active_display_count)
+               return -EINVAL;
+
+       return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+               return -EINVAL;
+
+       return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+               return -EINVAL;
+
+       return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+               return -EINVAL;
+
+       return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
index dd18cb710391a1f2caa304f81f487743bc5a2db4..f95c5f50eb0f0cfda70912aaa6d1805d93306976 100644 (file)
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
        return (clock + 99) / 100;
 }
 
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
 {
        struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
 
        if (smu10_data->need_min_deep_sleep_dcefclk &&
-           smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+               smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
                smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
        return 0;
 }
 
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (smu10_data->dcf_actual_hard_min_freq &&
+               smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+               smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinDcefclkByFreq,
+                                       smu10_data->dcf_actual_hard_min_freq);
+       }
+       return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (smu10_data->f_actual_hard_min_freq &&
+               smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+               smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinFclkByFreq,
+                                       smu10_data->f_actual_hard_min_freq);
+       }
+       return 0;
+}
+
 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
 {
        struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .get_max_high_clocks = smu10_get_max_high_clocks,
        .read_sensor = smu10_read_sensor,
        .set_active_display_count = smu10_set_active_display_count,
-       .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+       .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
        .dynamic_state_management_enable = smu10_enable_dpm_tasks,
        .power_off_asic = smu10_power_off_asic,
        .asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .display_clock_voltage_request = smu10_display_clock_voltage_request,
        .powergate_gfx = smu10_gfx_off_control,
        .powergate_sdma = smu10_powergate_sdma,
+       .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+       .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
index 5dcd21d29dbf682bcb69001539e61787291c009a..3958729d626523db7907e230d460cf7143730037 100644 (file)
@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
        case CHIP_POLARIS10:
        case CHIP_POLARIS11:
        case CHIP_POLARIS12:
-               switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+               if (hwmgr->is_kicker)
+                       switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+               else
+                       switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
                break;
        case CHIP_VEGAM:
                switch_limit_us = 30;
@@ -4219,9 +4222,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
        if (tmp & (1 << 23)) {
                data->mem_latency_high = MEM_LATENCY_HIGH;
                data->mem_latency_low = MEM_LATENCY_LOW;
+               if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+                   (hwmgr->chip_id == CHIP_POLARIS11) ||
+                   (hwmgr->chip_id == CHIP_POLARIS12))
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
        } else {
                data->mem_latency_high = 330;
                data->mem_latency_low = 330;
+               if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+                   (hwmgr->chip_id == CHIP_POLARIS11) ||
+                   (hwmgr->chip_id == CHIP_POLARIS12))
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
        }
 
        return 0;
index 54fd0125d9cf799b850c3a4904c09c71bb5ba22b..f4dab979a3a120dd87cea6a9e9e59a03b4e06f6e 100644 (file)
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
 
 extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
 extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
 #endif /* _HARDWARE_MANAGER_H_ */
 
index fb0f96f7cdbc703a97993a2953a60dbd8d9bc315..0d298a0409f578b6942a9594362890f381e6f8e2 100644 (file)
@@ -309,7 +309,7 @@ struct pp_hwmgr_func {
        int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
        int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
        int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
-       int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
        int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
                                        uint32_t virtual_addr_low,
@@ -332,6 +332,8 @@ struct pp_hwmgr_func {
        int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
        int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
        int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+       int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
 };
 
 struct pp_table_func {
index 62f36ba2435be4770e9a14f9dd8a779a2802fa79..d11d6a797ce4807668e2fff0cafcb6bcbdf35061 100644 (file)
@@ -395,6 +395,9 @@ typedef uint16_t PPSMC_Result;
 
 #define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
 
+#define PPSMC_MSG_EnableFFC                   ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC                  ((uint16_t) 0x308)
+
 #define PPSMC_MSG_EnableDpmDidt               ((uint16_t) 0x309)
 #define PPSMC_MSG_DisableDpmDidt              ((uint16_t) 0x30A)
 
index 2b2c266169023d44e52bd92dd09ab07e579fe7ac..b3e06e49883408097b35ba5493b1bec2092f9977 100644 (file)
@@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
        efuse = efuse >> 24;
 
        if (hwmgr->chip_id == CHIP_POLARIS10) {
-               min = 1000;
-               max = 2300;
+               if (hwmgr->is_kicker) {
+                       min = 1200;
+                       max = 2500;
+               } else {
+                       min = 1000;
+                       max = 2300;
+               }
+       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+               if (hwmgr->is_kicker) {
+                       min = 900;
+                       max = 2100;
+               } else {
+                       min = 1100;
+                       max = 2100;
+               }
        } else {
                min = 1100;
                max = 2100;
@@ -1626,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+       struct amdgpu_device *adev = hwmgr->adev;
 
        SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
        int result = 0;
@@ -1645,6 +1659,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
 
        result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
 
+       if (0 == result) {
+               if (((adev->pdev->device == 0x67ef) &&
+                    ((adev->pdev->revision == 0xe0) ||
+                     (adev->pdev->revision == 0xe5))) ||
+                   ((adev->pdev->device == 0x67ff) &&
+                    ((adev->pdev->revision == 0xcf) ||
+                     (adev->pdev->revision == 0xef) ||
+                     (adev->pdev->revision == 0xff)))) {
+                       avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+                       if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+                           (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+                               if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+                                   (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+                                   (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+                                   (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+                                   (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+                                   (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+                                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF718F1D4;
+                                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x323FD;
+                                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x1E455;
+                                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+                                       avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+                                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x23;
+                               }
+                       }
+               } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+                       avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF6B024DD;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x3005E;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0x18A5F;
+                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+                       avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x3B;
+               } else if (((adev->pdev->device == 0x67df) &&
+                           ((adev->pdev->revision == 0xe0) ||
+                            (adev->pdev->revision == 0xe3) ||
+                            (adev->pdev->revision == 0xe4) ||
+                            (adev->pdev->revision == 0xe5) ||
+                            (adev->pdev->revision == 0xe7) ||
+                            (adev->pdev->revision == 0xef))) ||
+                          ((adev->pdev->device == 0x6fdf) &&
+                           ((adev->pdev->revision == 0xef) ||
+                            (adev->pdev->revision == 0xff)))) {
+                       avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0   = 0xF843B66B;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1   = 0x59CB5;
+                       avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2   = 0xFFFF287F;
+                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+                       avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+                       avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b  = 0x58;
+               }
+       }
+
        if (0 == result) {
                table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
                table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
index 05c8e7267165acd425aef7bdb73cbd660c991c82..d2a1c7372f362c686993b74ce1200010962a9858 100644 (file)
@@ -250,7 +250,7 @@ EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
  * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
  * @iter: The iterator to initialize.
  * @old_state: Old plane state for validation.
- * @new_state: Plane state from which to iterate the damage clips.
+ * @state: Plane state from which to iterate the damage clips.
  *
  * Initialize an iterator, which clips plane damage
  * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
index 7fea74861a87fdff9a30c3a313a06360093aba80..160ce3c060a5666b40c542372436f1ebb0ff4b5e 100644 (file)
@@ -439,6 +439,4 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
 
        if (drm_debug & DRM_UT_DRIVER)
                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
-
-       gpu->lastctx = cmdbuf->ctx;
 }
index 52802e6049e0adab5045b57cc09bd265708ae34d..96efc84396bf73e7abe6dfe3cac90384099f9db2 100644 (file)
@@ -72,14 +72,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
        for (i = 0; i < ETNA_MAX_PIPES; i++) {
                struct etnaviv_gpu *gpu = priv->gpu[i];
 
-               if (gpu) {
-                       mutex_lock(&gpu->lock);
-                       if (gpu->lastctx == ctx)
-                               gpu->lastctx = NULL;
-                       mutex_unlock(&gpu->lock);
-
+               if (gpu)
                        drm_sched_entity_destroy(&ctx->sched_entity[i]);
-               }
        }
 
        kfree(ctx);
@@ -523,7 +517,7 @@ static int etnaviv_bind(struct device *dev)
        if (!priv) {
                dev_err(dev, "failed to allocate private data\n");
                ret = -ENOMEM;
-               goto out_unref;
+               goto out_put;
        }
        drm->dev_private = priv;
 
@@ -549,7 +543,7 @@ out_register:
        component_unbind_all(dev, drm);
 out_bind:
        kfree(priv);
-out_unref:
+out_put:
        drm_dev_put(drm);
 
        return ret;
index 8d02d1b7dcf5a54b5bc1623b847f73dad1b493d0..4bf698de599696f0462ee50af4f7b5ff59fc199b 100644 (file)
@@ -107,17 +107,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
        return base + nelem * elem_size;
 }
 
-/* returns true if fence a comes after fence b */
-static inline bool fence_after(u32 a, u32 b)
-{
-       return (s32)(a - b) > 0;
-}
-
-static inline bool fence_after_eq(u32 a, u32 b)
-{
-       return (s32)(a - b) >= 0;
-}
-
 /*
  * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
  * We need to calculate the timeout in terms of number of jiffies
index 9146e30e24a6de20d8bda5f80c7ea7470ddca334..3fbb4855396cc98505eeb2062de6579c01478d57 100644 (file)
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
        unsigned int n_obj, n_bomap_pages;
        size_t file_size, mmu_size;
        __le64 *bomap, *bomap_start;
+       unsigned long flags;
 
        /* Only catch the first event, or when manually re-armed */
        if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
                    mmu_size + gpu->buffer.size;
 
        /* Add in the active command buffers */
-       spin_lock(&gpu->sched.job_list_lock);
+       spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
        list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
                submit = to_etnaviv_submit(s_job);
                file_size += submit->cmdbuf.size;
                n_obj++;
        }
-       spin_unlock(&gpu->sched.job_list_lock);
+       spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
 
        /* Add in the active buffer objects */
        list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
                              gpu->buffer.size,
                              etnaviv_cmdbuf_get_va(&gpu->buffer));
 
-       spin_lock(&gpu->sched.job_list_lock);
+       spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
        list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
                submit = to_etnaviv_submit(s_job);
                etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
                                      submit->cmdbuf.vaddr, submit->cmdbuf.size,
                                      etnaviv_cmdbuf_get_va(&submit->cmdbuf));
        }
-       spin_unlock(&gpu->sched.job_list_lock);
+       spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
 
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
index f225fbc6edd2d94c7b82a37b56c401a66966fd95..6904535475de1828efc6a273612e4da7ad3284b6 100644 (file)
@@ -3,10 +3,12 @@
  * Copyright (C) 2015-2018 Etnaviv Project
  */
 
+#include <linux/clk.h>
 #include <linux/component.h>
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
 #include <linux/thermal.h>
 
 #include "etnaviv_cmdbuf.h"
@@ -976,7 +978,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
 
 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
 {
-       unsigned long flags;
        unsigned int i = 0;
 
        dev_err(gpu->dev, "recover hung GPU!\n");
@@ -989,15 +990,13 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
        etnaviv_hw_reset(gpu);
 
        /* complete all events, the GPU won't do it after the reset */
-       spin_lock_irqsave(&gpu->event_spinlock, flags);
+       spin_lock(&gpu->event_spinlock);
        for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
                complete(&gpu->event_free);
        bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
-       spin_unlock_irqrestore(&gpu->event_spinlock, flags);
-       gpu->completed_fence = gpu->active_fence;
+       spin_unlock(&gpu->event_spinlock);
 
        etnaviv_gpu_hw_init(gpu);
-       gpu->lastctx = NULL;
        gpu->exec_state = -1;
 
        mutex_unlock(&gpu->lock);
@@ -1032,7 +1031,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence)
 {
        struct etnaviv_fence *f = to_etnaviv_fence(fence);
 
-       return fence_completed(f->gpu, f->base.seqno);
+       return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
 }
 
 static void etnaviv_fence_release(struct dma_fence *fence)
@@ -1071,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
        return &f->base;
 }
 
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+       return (s32)(a - b) > 0;
+}
+
 /*
  * event management:
  */
@@ -1078,7 +1083,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
        unsigned int *events)
 {
-       unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
+       unsigned long timeout = msecs_to_jiffies(10 * 10000);
        unsigned i, acquired = 0;
 
        for (i = 0; i < nr_events; i++) {
@@ -1095,7 +1100,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
                timeout = ret;
        }
 
-       spin_lock_irqsave(&gpu->event_spinlock, flags);
+       spin_lock(&gpu->event_spinlock);
 
        for (i = 0; i < nr_events; i++) {
                int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
@@ -1105,7 +1110,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
                set_bit(event, gpu->event_bitmap);
        }
 
-       spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+       spin_unlock(&gpu->event_spinlock);
 
        return 0;
 
@@ -1118,18 +1123,11 @@ out:
 
 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpu->event_spinlock, flags);
-
        if (!test_bit(event, gpu->event_bitmap)) {
                dev_warn(gpu->dev, "event %u is already marked as free",
                         event);
-               spin_unlock_irqrestore(&gpu->event_spinlock, flags);
        } else {
                clear_bit(event, gpu->event_bitmap);
-               spin_unlock_irqrestore(&gpu->event_spinlock, flags);
-
                complete(&gpu->event_free);
        }
 }
@@ -1306,8 +1304,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
                goto out_unlock;
        }
 
-       gpu->active_fence = gpu_fence->seqno;
-
        if (submit->nr_pmrs) {
                gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
                kref_get(&submit->refcount);
@@ -1549,7 +1545,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
        etnaviv_gpu_update_clock(gpu);
        etnaviv_gpu_hw_init(gpu);
 
-       gpu->lastctx = NULL;
        gpu->exec_state = -1;
 
        mutex_unlock(&gpu->lock);
@@ -1806,8 +1801,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
        struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
        u32 idle, mask;
 
-       /* If we have outstanding fences, we're not idle */
-       if (gpu->completed_fence != gpu->active_fence)
+       /* If there are any jobs in the HW queue, we're not idle */
+       if (atomic_read(&gpu->sched.hw_rq_count))
                return -EBUSY;
 
        /* Check whether the hardware (except FE) is idle */
index 9a75a6937268eebff62e04a13b3669fb3d5934b1..9bcf151f706bb7d1da8d518a92d2b446134fbad9 100644 (file)
@@ -6,9 +6,6 @@
 #ifndef __ETNAVIV_GPU_H__
 #define __ETNAVIV_GPU_H__
 
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 
@@ -88,6 +85,8 @@ struct etnaviv_event {
 
 struct etnaviv_cmdbuf_suballoc;
 struct etnaviv_cmdbuf;
+struct regulator;
+struct clk;
 
 #define ETNA_NR_EVENTS 30
 
@@ -98,7 +97,6 @@ struct etnaviv_gpu {
        struct mutex lock;
        struct etnaviv_chip_identity identity;
        enum etnaviv_sec_mode sec_mode;
-       struct etnaviv_file_private *lastctx;
        struct workqueue_struct *wq;
        struct drm_gpu_scheduler sched;
 
@@ -121,7 +119,6 @@ struct etnaviv_gpu {
        struct mutex fence_lock;
        struct idr fence_idr;
        u32 next_fence;
-       u32 active_fence;
        u32 completed_fence;
        wait_queue_head_t fence_event;
        u64 fence_context;
@@ -161,11 +158,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
        return readl(gpu->mmio + reg);
 }
 
-static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
-{
-       return fence_after_eq(gpu->completed_fence, fence);
-}
-
 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
 
 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
index e3d6a8584715a7966b2d87bfa546bdddf0f90605..786a8ee6f10fbc483d40ad59fbc7877767a8fb7f 100644 (file)
@@ -228,6 +228,21 @@ static const uint32_t fimd_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
+static const unsigned int capabilities[WINDOWS_NR] = {
+       0,
+       EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+       EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+       EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+       EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
+static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask,
+                                u32 val)
+{
+       val = (val & mask) | (readl(ctx->regs + reg) & ~mask);
+       writel(val, ctx->regs + reg);
+}
+
 static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
@@ -551,13 +566,88 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
        writel(val, ctx->regs + VIDCON0);
 }
 
+static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win,
+                              unsigned int alpha, unsigned int pixel_alpha)
+{
+       u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf);
+       u32 val = 0;
+
+       switch (pixel_alpha) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+       case DRM_MODE_BLEND_COVERAGE:
+               val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A);
+               val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+               break;
+       case DRM_MODE_BLEND_PREMULTI:
+       default:
+               if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+                       val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0);
+                       val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+               } else {
+                       val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE);
+                       val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+               }
+               break;
+       }
+       fimd_set_bits(ctx, BLENDEQx(win), mask, val);
+}
 
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
-                               uint32_t pixel_format, int width)
+static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+                               unsigned int alpha, unsigned int pixel_alpha)
 {
-       unsigned long val;
+       u32 win_alpha_l = (alpha >> 8) & 0xf;
+       u32 win_alpha_h = alpha >> 12;
+       u32 val = 0;
 
-       val = WINCONx_ENWIN;
+       switch (pixel_alpha) {
+       case DRM_MODE_BLEND_PIXEL_NONE:
+               break;
+       case DRM_MODE_BLEND_COVERAGE:
+       case DRM_MODE_BLEND_PREMULTI:
+       default:
+               val |= WINCON1_ALPHA_SEL;
+               val |= WINCON1_BLD_PIX;
+               val |= WINCON1_ALPHA_MUL;
+               break;
+       }
+       fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val);
+
+       /* OSD alpha */
+       val = VIDISD14C_ALPHA0_R(win_alpha_h) |
+               VIDISD14C_ALPHA0_G(win_alpha_h) |
+               VIDISD14C_ALPHA0_B(win_alpha_h) |
+               VIDISD14C_ALPHA1_R(0x0) |
+               VIDISD14C_ALPHA1_G(0x0) |
+               VIDISD14C_ALPHA1_B(0x0);
+       writel(val, ctx->regs + VIDOSD_C(win));
+
+       val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) |
+               VIDW_ALPHA_B(win_alpha_l);
+       writel(val, ctx->regs + VIDWnALPHA0(win));
+
+       val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) |
+               VIDW_ALPHA_B(0x0);
+       writel(val, ctx->regs + VIDWnALPHA1(win));
+
+       fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK,
+                       BLENDCON_NEW_8BIT_ALPHA_VALUE);
+}
+
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+                               struct drm_framebuffer *fb, int width)
+{
+       struct exynos_drm_plane plane = ctx->planes[win];
+       struct exynos_drm_plane_state *state =
+               to_exynos_plane_state(plane.base.state);
+       uint32_t pixel_format = fb->format->format;
+       unsigned int alpha = state->base.alpha;
+       u32 val = WINCONx_ENWIN;
+       unsigned int pixel_alpha;
+
+       if (fb->format->has_alpha)
+               pixel_alpha = state->base.pixel_blend_mode;
+       else
+               pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
 
        /*
         * In case of s3c64xx, window 0 doesn't support alpha channel.
@@ -591,8 +681,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
                break;
        case DRM_FORMAT_ARGB8888:
        default:
-               val |= WINCON1_BPPMODE_25BPP_A1888
-                       | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+               val |= WINCON1_BPPMODE_25BPP_A1888;
                val |= WINCONx_WSWP;
                val |= WINCONx_BURSTLEN_16WORD;
                break;
@@ -610,25 +699,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
                val &= ~WINCONx_BURSTLEN_MASK;
                val |= WINCONx_BURSTLEN_4WORD;
        }
-
-       writel(val, ctx->regs + WINCON(win));
+       fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val);
 
        /* hardware window 0 doesn't support alpha channel. */
        if (win != 0) {
-               /* OSD alpha */
-               val = VIDISD14C_ALPHA0_R(0xf) |
-                       VIDISD14C_ALPHA0_G(0xf) |
-                       VIDISD14C_ALPHA0_B(0xf) |
-                       VIDISD14C_ALPHA1_R(0xf) |
-                       VIDISD14C_ALPHA1_G(0xf) |
-                       VIDISD14C_ALPHA1_B(0xf);
-
-               writel(val, ctx->regs + VIDOSD_C(win));
-
-               val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
-                       VIDW_ALPHA_G(0xf);
-               writel(val, ctx->regs + VIDWnALPHA0(win));
-               writel(val, ctx->regs + VIDWnALPHA1(win));
+               fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+               fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha);
        }
 }
 
@@ -785,7 +861,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
                DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
        }
 
-       fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w);
+       fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
 
        /* hardware window 0 doesn't support color key. */
        if (win != 0)
@@ -987,6 +1063,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
                ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
                ctx->configs[i].zpos = i;
                ctx->configs[i].type = fimd_win_types[i];
+               ctx->configs[i].capabilities = capabilities[i];
                ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
                                        &ctx->configs[i]);
                if (ret)
index 843a9d40c05e33f5f13d1cbb7b09c751127940f2..cf549f1ed4032f4394374155049dc68effd8089c 100644 (file)
@@ -2,7 +2,7 @@
 config DRM_MSM
        tristate "MSM DRM"
        depends on DRM
-       depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+       depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
        select QCOM_MDT_LOADER if ARCH_QCOM
@@ -11,7 +11,7 @@ config DRM_MSM
        select DRM_PANEL
        select SHMEM
        select TMPFS
-       select QCOM_SCM
+       select QCOM_SCM if ARCH_QCOM
        select WANT_DEV_COREDUMP
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
index 19ab521d4c3ad10cc368b8227b44b7bad5b328f6..56a70c74af4ed2e275da7255ed66907519bd5e6a 100644 (file)
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 msm-y := \
        adreno/adreno_device.o \
        adreno/adreno_gpu.o \
+       adreno/a2xx_gpu.o \
        adreno/a3xx_gpu.o \
        adreno/a4xx_gpu.o \
        adreno/a5xx_gpu.o \
@@ -14,6 +15,7 @@ msm-y := \
        adreno/a6xx_gpu.o \
        adreno/a6xx_gmu.o \
        adreno/a6xx_hfi.o \
+       adreno/a6xx_gpu_state.o \
        hdmi/hdmi.o \
        hdmi/hdmi_audio.o \
        hdmi/hdmi_bridge.o \
@@ -68,11 +70,9 @@ msm-y := \
        disp/dpu1/dpu_hw_util.o \
        disp/dpu1/dpu_hw_vbif.o \
        disp/dpu1/dpu_io_util.o \
-       disp/dpu1/dpu_irq.o \
        disp/dpu1/dpu_kms.o \
        disp/dpu1/dpu_mdss.o \
        disp/dpu1/dpu_plane.o \
-       disp/dpu1/dpu_power_handle.o \
        disp/dpu1/dpu_rm.o \
        disp/dpu1/dpu_vbif.o \
        msm_atomic.o \
@@ -90,10 +90,11 @@ msm-y := \
        msm_perf.o \
        msm_rd.o \
        msm_ringbuffer.o \
-       msm_submitqueue.o
+       msm_submitqueue.o \
+       msm_gpu_tracepoints.o \
+       msm_gpummu.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
-                         disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
 
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
index 12b0ba270b5ebe3b2930603b23f28819f97f4b25..14eb52f3e605223f278bfa182c7cddee8aa868ec 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -239,7 +239,63 @@ enum sq_tex_swiz {
 enum sq_tex_filter {
        SQ_TEX_FILTER_POINT = 0,
        SQ_TEX_FILTER_BILINEAR = 1,
-       SQ_TEX_FILTER_BICUBIC = 2,
+       SQ_TEX_FILTER_BASEMAP = 2,
+       SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+       SQ_TEX_ANISO_FILTER_DISABLED = 0,
+       SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+       SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+       SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+       SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+       SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+       SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+       SQ_TEX_DIMENSION_1D = 0,
+       SQ_TEX_DIMENSION_2D = 1,
+       SQ_TEX_DIMENSION_3D = 2,
+       SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+       SQ_TEX_BORDER_COLOR_BLACK = 0,
+       SQ_TEX_BORDER_COLOR_WHITE = 1,
+       SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+       SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+       SQ_TEX_SIGN_UNISIGNED = 0,
+       SQ_TEX_SIGN_SIGNED = 1,
+       SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+       SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+       SQ_TEX_ENDIAN_NONE = 0,
+       SQ_TEX_ENDIAN_8IN16 = 1,
+       SQ_TEX_ENDIAN_8IN32 = 2,
+       SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+       SQ_TEX_CLAMP_POLICY_D3D = 0,
+       SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+       SQ_TEX_NUM_FORMAT_FRAC = 0,
+       SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+       SQ_TEX_TYPE_0 = 0,
+       SQ_TEX_TYPE_1 = 1,
+       SQ_TEX_TYPE_2 = 2,
+       SQ_TEX_TYPE_3 = 3,
 };
 
 #define REG_A2XX_RBBM_PATCH_RELEASE                            0x00000001
@@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 }
 
 #define REG_A2XX_MH_MMU_VA_RANGE                               0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK            0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT           0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+       return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK                     0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT                    12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+       return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
 
 #define REG_A2XX_MH_MMU_PT_BASE                                        0x00000042
 
@@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 #define REG_A2XX_MH_MMU_TRAN_ERROR                             0x00000044
 
 #define REG_A2XX_MH_MMU_INVALIDATE                             0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL                  0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC                   0x00000002
 
 #define REG_A2XX_MH_MMU_MPU_BASE                               0x00000046
 
@@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
 #define REG_A2XX_RBBM_READ_ERROR                               0x000003b3
 
 #define REG_A2XX_RBBM_INT_CNTL                                 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK                      0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK             0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK                   0x00080000
 
 #define REG_A2XX_RBBM_INT_STATUS                               0x000003b5
 
 #define REG_A2XX_RBBM_INT_ACK                                  0x000003b6
 
 #define REG_A2XX_MASTER_INT_SIGNAL                             0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT                     0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT                     0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT                     0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT                   0x80000000
 
 #define REG_A2XX_RBBM_PERIPHID1                                        0x000003f9
 
@@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
 #define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE                  0x02000000
 #define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE                  0x04000000
 
+#define REG_A2XX_MH_INTERRUPT_MASK                             0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR                  0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR                 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT                  0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS                           0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR                            0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1                     0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2                     0x00000a55
+
 #define REG_A2XX_A220_VSC_BIN_SIZE                             0x00000c01
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK                     0x0000001f
 #define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT                    0
@@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
 #define REG_A2XX_RB_DEBUG_DATA                                 0x00000f27
 
 #define REG_A2XX_RB_SURFACE_INFO                               0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK               0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT              0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+       return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK                        0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT               14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+       return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
 
 #define REG_A2XX_RB_COLOR_INFO                                 0x00002001
 #define A2XX_RB_COLOR_INFO_FORMAT__MASK                                0x0000000f
@@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
 #define A2XX_RB_COLOR_INFO_BASE__SHIFT                         12
 static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
 {
-       return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+       return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
 }
 
 #define REG_A2XX_RB_DEPTH_INFO                                 0x00002002
@@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
 #define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT                   12
 static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
 {
-       return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+       return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
 }
 
 #define REG_A2XX_A225_RB_COLOR_INFO3                           0x00002005
@@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
 #define REG_A2XX_COHER_STATUS_PM4                              0x00000a2b
 
 #define REG_A2XX_SQ_TEX_0                                      0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK                               0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT                              0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+       return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK                             0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT                            2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+       return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK                             0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT                            4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+       return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK                             0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT                            6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+       return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK                             0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT                            8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+       return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
 #define A2XX_SQ_TEX_0_CLAMP_X__MASK                            0x00001c00
 #define A2XX_SQ_TEX_0_CLAMP_X__SHIFT                           10
 static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
 {
        return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
 }
-#define A2XX_SQ_TEX_0_PITCH__MASK                              0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK                              0x7fc00000
 #define A2XX_SQ_TEX_0_PITCH__SHIFT                             22
 static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
 {
        return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
 }
+#define A2XX_SQ_TEX_0_TILED                                    0x00000002
 
 #define REG_A2XX_SQ_TEX_1                                      0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK                             0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT                            0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+       return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK                         0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT                                6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+       return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK                       0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT                      8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED                                  0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK                       0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT                      11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+       return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK                       0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT                      12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+       return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
 
 #define REG_A2XX_SQ_TEX_2                                      0x00000002
 #define A2XX_SQ_TEX_2_WIDTH__MASK                              0x00001fff
@@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
 {
        return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
 }
+#define A2XX_SQ_TEX_2_DEPTH__MASK                              0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT                             26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
 
 #define REG_A2XX_SQ_TEX_3                                      0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK                         0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT                                0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+       return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
 #define A2XX_SQ_TEX_3_SWIZ_X__MASK                             0x0000000e
 #define A2XX_SQ_TEX_3_SWIZ_X__SHIFT                            1
 static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
 {
        return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
 }
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK                         0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT                                13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
 #define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK                      0x00180000
 #define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT                     19
 static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
 {
        return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
 }
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK                         0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT                                23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+       return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK                       0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT                      25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+       return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK                                0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT                       31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4                                      0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK                     0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT                    0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+       return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK                     0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT                    1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+       return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK                      0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT                     2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK                      0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT                     6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK                           0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK                           0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK                           0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT                          12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+       return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK                  0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT                 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK                  0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT                 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5                                      0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK                       0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT                      0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+       return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX                            0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK                          0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT                         3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+       return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK                         0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT                                5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+       return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK                          0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT                         9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+       return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS                              0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK                                0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT                       12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+       return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
 
 
 #endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644 (file)
index 0000000..1f83bc1
--- /dev/null
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+       struct msm_ringbuffer *ring = gpu->rb[0];
+
+       OUT_PKT3(ring, CP_ME_INIT, 18);
+
+       /* All fields present (bits 9:0) */
+       OUT_RING(ring, 0x000003ff);
+       /* Disable/Enable Real-Time Stream processing (present but ignored) */
+       OUT_RING(ring, 0x00000000);
+       /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+       OUT_RING(ring, 0x00000000);
+
+       OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+       OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+       OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+       OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+       OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+       OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+       OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+       OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+       /* Vertex and Pixel Shader Start Addresses in instructions
+        * (3 DWORDS per instruction) */
+       OUT_RING(ring, 0x80000180);
+       /* Maximum Contexts */
+       OUT_RING(ring, 0x00000001);
+       /* Write Confirm Interval and The CP will wait the
+        * wait_interval * 16 clocks between polling  */
+       OUT_RING(ring, 0x00000000);
+       /* NQ and External Memory Swap */
+       OUT_RING(ring, 0x00000000);
+       /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+       OUT_RING(ring, 0x200001f2);
+       /* Disable header dumping and Header dump address */
+       OUT_RING(ring, 0x00000000);
+       /* Header dump size */
+       OUT_RING(ring, 0x00000000);
+
+       /* enable protected mode */
+       OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+       OUT_RING(ring, 1);
+
+       gpu->funcs->flush(gpu, ring);
+       return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       dma_addr_t pt_base, tran_error;
+       uint32_t *ptr, len;
+       int i, ret;
+
+       msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+       DBG("%s", gpu->name);
+
+       /* halt ME to avoid ucode upload issues on a20x */
+       gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+       gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+       gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+       /* note: kgsl uses 0x00000001 after first reset on a22x */
+       gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+       msleep(30);
+       gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+       if (adreno_is_a225(adreno_gpu))
+               gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+       /* note: kgsl uses 0x0000ffff for a20x */
+       gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+       /* MPU: physical range */
+       gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+       gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+       gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+               A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+               A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+       /* same as parameters in adreno_gpu */
+       gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+               A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+       gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+       gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+       gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+       gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+               A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+               A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+               A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+               A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+               A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+       if (!adreno_is_a20x(adreno_gpu))
+               gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+       gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+       gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+       gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+       gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+       /* note: gsl doesn't set this */
+       gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+       gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+               A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+       gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+               AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+               AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+               AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+               AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+               AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+               AXXX_CP_INT_CNTL_IB1_INT_MASK |
+               AXXX_CP_INT_CNTL_RB_INT_MASK);
+       gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+       gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+               A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+               A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+               A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+       for (i = 3; i <= 5; i++)
+               if ((SZ_16K << i) == adreno_gpu->gmem)
+                       break;
+       gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+       ret = adreno_hw_init(gpu);
+       if (ret)
+               return ret;
+
+       /* NOTE: PM4/micro-engine firmware registers look to be the same
+        * for a2xx and a3xx.. we could possibly push that part down to
+        * adreno_gpu base class.  Or push both PM4 and PFP but
+        * parameterize the pfp ucode addr/data registers..
+        */
+
+       /* Load PM4: */
+       ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+       len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+       DBG("loading PM4 ucode version: %x", ptr[1]);
+
+       gpu_write(gpu, REG_AXXX_CP_DEBUG,
+                       AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+       gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+       for (i = 1; i < len; i++)
+               gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+       /* Load PFP: */
+       ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+       len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+       DBG("loading PFP ucode version: %x", ptr[5]);
+
+       gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+       for (i = 1; i < len; i++)
+               gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+       gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+       /* clear ME_HALT to start micro engine */
+       gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+       return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+       int i;
+
+       adreno_dump_info(gpu);
+
+       for (i = 0; i < 8; i++) {
+               printk("CP_SCRATCH_REG%d: %u\n", i,
+                       gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+       }
+
+       /* dump registers before resetting gpu, if enabled: */
+       if (hang_debug)
+               a2xx_dump(gpu);
+
+       gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+       gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+       gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+       adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+       DBG("%s", gpu->name);
+
+       adreno_gpu_cleanup(adreno_gpu);
+
+       kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+       /* wait for ringbuffer to drain: */
+       if (!adreno_idle(gpu, gpu->rb[0]))
+               return false;
+
+       /* then wait for GPU to finish: */
+       if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+                       A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+               DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+               /* TODO maybe we need to reset GPU here to recover from hang? */
+               return false;
+       }
+
+       return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+       uint32_t mstatus, status;
+
+       mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+       if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+               status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+               dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+               dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+                       gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+               gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+       }
+
+       if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+               status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+               /* only RB_INT is expected */
+               if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+                       dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+               gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+       }
+
+       if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+               status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+               dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+               gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+       }
+
+       msm_gpu_retire(gpu);
+
+       return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+       0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+       0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+       0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+       0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+       0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+       0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+       0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+       0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+       0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+       0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+       0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+       0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+       0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+       0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+       0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+       0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+       0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+       0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+       0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+       0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+       0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+       0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+       0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+       0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+       0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+       ~0   /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+       0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+       0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+       0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+       0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+       0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+       0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+       0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+       0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+       0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+       0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+       0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+       0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+       0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+       0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+       0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+       0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+       0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+       0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+       0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+       0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+       0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+       0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+       0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+       0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+       0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+       0x4900, 0x4900, 0x4908, 0x4908,
+       ~0   /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+       0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+       0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+       0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+       0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+       0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+       0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+       0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+       0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+       0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+       0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+       0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+       0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+       0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+       0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+       0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+       0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+       0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+       0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+       0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+       0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+       0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+       0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+       0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+       0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+       0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+       0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+       0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+       0x4908, 0x4908,
+       ~0   /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+       printk("status:   %08x\n",
+                       gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+       adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+       struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       adreno_gpu_state_get(gpu, state);
+
+       state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+       return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+       .base = {
+               .get_param = adreno_get_param,
+               .hw_init = a2xx_hw_init,
+               .pm_suspend = msm_gpu_pm_suspend,
+               .pm_resume = msm_gpu_pm_resume,
+               .recover = a2xx_recover,
+               .submit = adreno_submit,
+               .flush = adreno_flush,
+               .active_ring = adreno_active_ring,
+               .irq = a2xx_irq,
+               .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+               .show = adreno_show,
+#endif
+               .gpu_state_get = a2xx_gpu_state_get,
+               .gpu_state_put = adreno_gpu_state_put,
+       },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+       struct a2xx_gpu *a2xx_gpu = NULL;
+       struct adreno_gpu *adreno_gpu;
+       struct msm_gpu *gpu;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev = priv->gpu_pdev;
+       int ret;
+
+       if (!pdev) {
+               dev_err(dev->dev, "no a2xx device\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+       if (!a2xx_gpu) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       adreno_gpu = &a2xx_gpu->base;
+       gpu = &adreno_gpu->base;
+
+       gpu->perfcntrs = perfcntrs;
+       gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+       if (adreno_is_a20x(adreno_gpu))
+               adreno_gpu->registers = a200_registers;
+       else if (adreno_is_a225(adreno_gpu))
+               adreno_gpu->registers = a225_registers;
+       else
+               adreno_gpu->registers = a220_registers;
+
+       adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+       if (ret)
+               goto fail;
+
+       if (!gpu->aspace) {
+               dev_err(dev->dev, "No memory protection without MMU\n");
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       return gpu;
+
+fail:
+       if (a2xx_gpu)
+               a2xx_destroy(&a2xx_gpu->base.base);
+
+       return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644 (file)
index 0000000..02fba2c
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+       struct adreno_gpu base;
+       bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
index a89f7bb8b5cc057fcf0da9ba85d37c4e1c04a0a0..17059f242a98e0ac42f66e041fc4bb48a813a884 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
index 669c2d4b070dea4c711e0e8df9585d68dd810cd9..c3b4bc6e4155e27393510f8ff0accbceabf4955d 100644 (file)
@@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        int ret;
 
        if (!pdev) {
-               dev_err(dev->dev, "no a3xx device\n");
+               DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
                ret = -ENXIO;
                goto fail;
        }
@@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
                 * to not be possible to restrict access, then we must
                 * implement a cmdstream validator.
                 */
-               dev_err(dev->dev, "No memory protection without IOMMU\n");
+               DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
                ret = -ENXIO;
                goto fail;
        }
index 858690f528549abba1dca05145adf2f4d9fbcc90..9b51e25a9583ec72498f29b8b0542f3554f25e51 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
index 7c4e6dc1ed59961e0df2f14fcbc9805c5a2cc359..18f9a8e0bf3b535385e964f8c107ccf208f52d30 100644 (file)
@@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
        int ret;
 
        if (!pdev) {
-               dev_err(dev->dev, "no a4xx device\n");
+               DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
                ret = -ENXIO;
                goto fail;
        }
@@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
                 * to not be possible to restrict access, then we must
                 * implement a cmdstream validator.
                 */
-               dev_err(dev->dev, "No memory protection without IOMMU\n");
+               DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
                ret = -ENXIO;
                goto fail;
        }
index b4944cc0e62f903b3d4fa653bc0064e54db722ee..cf4fe14ddd6ec05c158fa173f0dbafbbdec8b3a2 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
index d2127b1c4eced57a0fa2233550258c17d449a33b..d9af3aff690f1613f2ca4a65328683b6a8a845f2 100644 (file)
@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
        adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
 
        if (a5xx_gpu->pm4_bo) {
-               if (a5xx_gpu->pm4_iova)
-                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+               msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
                drm_gem_object_put(a5xx_gpu->pm4_bo);
                a5xx_gpu->pm4_bo = NULL;
        }
 
        if (a5xx_gpu->pfp_bo) {
-               if (a5xx_gpu->pfp_iova)
-                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+               msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
                drm_gem_object_put(a5xx_gpu->pfp_bo);
                a5xx_gpu->pfp_bo = NULL;
        }
@@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
                        minor->debugfs_root, minor);
 
        if (ret) {
-               dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+               DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
                return ret;
        }
 
index 8edd80bb0428e624e156a7a9b73fef68155688ea..d5f5e56422f577f9fa6edd00fa1cd2be4575856f 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/soc/qcom/mdt_loader.h>
 #include <linux/pm_opp.h>
 #include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
 #include <linux/slab.h>
 #include "msm_gem.h"
 #include "msm_mmu.h"
@@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
                        adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
 
+
                if (IS_ERR(a5xx_gpu->pm4_bo)) {
                        ret = PTR_ERR(a5xx_gpu->pm4_bo);
                        a5xx_gpu->pm4_bo = NULL;
-                       dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+                       DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
                                ret);
                        return ret;
                }
+
+               msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
        }
 
        if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                if (IS_ERR(a5xx_gpu->pfp_bo)) {
                        ret = PTR_ERR(a5xx_gpu->pfp_bo);
                        a5xx_gpu->pfp_bo = NULL;
-                       dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+                       DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
                                ret);
                        return ret;
                }
+
+               msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
        }
 
        gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
        a5xx_preempt_fini(gpu);
 
        if (a5xx_gpu->pm4_bo) {
-               if (a5xx_gpu->pm4_iova)
-                       msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+               msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
                drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
        }
 
        if (a5xx_gpu->pfp_bo) {
-               if (a5xx_gpu->pfp_iova)
-                       msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+               msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
                drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
        }
 
        if (a5xx_gpu->gpmu_bo) {
-               if (a5xx_gpu->gpmu_iova)
-                       msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+               msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
                drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
        }
 
@@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
 
-       dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+       DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
                ring ? ring->id : -1, ring ? ring->seqno : 0,
                gpu_read(gpu, REG_A5XX_RBBM_STATUS),
                gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
 
 static void a5xx_dump(struct msm_gpu *gpu)
 {
-       dev_info(gpu->dev->dev, "status:   %08x\n",
+       DRM_DEV_INFO(gpu->dev->dev, "status:   %08x\n",
                gpu_read(gpu, REG_A5XX_RBBM_STATUS));
        adreno_dump(gpu);
 }
@@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
        u32 *hlsqregs;
 };
 
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
-       readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
-               interval, timeout)
-
 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
                struct a5xx_crashdumper *dumper)
 {
@@ -1222,16 +1219,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
                SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
                &dumper->bo, &dumper->iova);
 
-       return PTR_ERR_OR_ZERO(dumper->ptr);
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
-               struct a5xx_crashdumper *dumper)
-{
-       msm_gem_put_iova(dumper->bo, gpu->aspace);
-       msm_gem_put_vaddr(dumper->bo);
+       if (!IS_ERR(dumper->ptr))
+               msm_gem_object_set_name(dumper->bo, "crashdump");
 
-       drm_gem_object_put(dumper->bo);
+       return PTR_ERR_OR_ZERO(dumper->ptr);
 }
 
 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1326,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
 
        if (a5xx_crashdumper_run(gpu, &dumper)) {
                kfree(a5xx_state->hlsqregs);
-               a5xx_crashdumper_free(gpu, &dumper);
+               msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
                return;
        }
 
@@ -1334,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
        memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
                count * sizeof(u32));
 
-       a5xx_crashdumper_free(gpu, &dumper);
+       msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
 }
 
 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1505,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
        int ret;
 
        if (!pdev) {
-               dev_err(dev->dev, "No A5XX device is defined\n");
+               DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
                return ERR_PTR(-ENXIO);
        }
 
index 7a41e1c147e4208861076d1d6ab0ad9f1cff5a78..70e65c94e52514e1be323313c36e536de26ef76a 100644 (file)
@@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
                MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
                &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
        if (IS_ERR(ptr))
-               goto err;
+               return;
+
+       msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
 
        while (cmds_size > 0) {
                int i;
@@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
 
        msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
        a5xx_gpu->gpmu_dwords = dwords;
-
-       return;
-err:
-       if (a5xx_gpu->gpmu_iova)
-               msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
-       if (a5xx_gpu->gpmu_bo)
-               drm_gem_object_put(a5xx_gpu->gpmu_bo);
-
-       a5xx_gpu->gpmu_bo = NULL;
-       a5xx_gpu->gpmu_iova = 0;
-       a5xx_gpu->gpmu_dwords = 0;
 }
index 4c357ead1be62956edf987080a40761e1f20d7b3..3d62310a535fbd1e3b746cc6c23bec3d81e1668c 100644 (file)
@@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
        if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
                return;
 
-       dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+       DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
        queue_work(priv->wq, &gpu->recover_work);
 }
 
@@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
        status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
        if (unlikely(status)) {
                set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
-               dev_err(dev->dev, "%s: Preemption failed to complete\n",
+               DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
                        gpu->name);
                queue_work(priv->wq, &gpu->recover_work);
                return;
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
        if (IS_ERR(ptr))
                return PTR_ERR(ptr);
 
+       msm_gem_object_set_name(bo, "preempt");
+
        a5xx_gpu->preempt_bo[ring->id] = bo;
        a5xx_gpu->preempt_iova[ring->id] = iova;
        a5xx_gpu->preempt[ring->id] = ptr;
@@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        int i;
 
-       for (i = 0; i < gpu->nr_rings; i++) {
-               if (!a5xx_gpu->preempt_bo[i])
-                       continue;
-
-               msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
-               if (a5xx_gpu->preempt_iova[i])
-                       msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
-               drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
-               a5xx_gpu->preempt_bo[i] = NULL;
-       }
+       for (i = 0; i < gpu->nr_rings; i++)
+               msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
 }
 
 void a5xx_preempt_init(struct msm_gpu *gpu)
index a6f7c40454a6ea6a64633a3b202edace4a0bf9da..f44553ec3193577a0ee7f4ff6c8939ea3ff5b63e 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
        PERF_VFDP_VS_STAGE_WAVES = 22,
 };
 
-enum a6xx_hslq_perfcounter_select {
+enum a6xx_hlsq_perfcounter_select {
        PERF_HLSQ_BUSY_CYCLES = 0,
        PERF_HLSQ_STALL_CYCLES_UCHE = 1,
        PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
 #define A6XX_GRAS_LRZ_CNTL_ENABLE                              0x00000001
 #define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE                           0x00000002
 #define A6XX_GRAS_LRZ_CNTL_GREATER                             0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3                                        0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4                                        0x00000010
 
 #define REG_A6XX_GRAS_UNKNOWN_8101                             0x00008101
 
@@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
 #define REG_A6XX_GRAS_UNKNOWN_8110                             0x00008110
 
 #define REG_A6XX_GRAS_2D_BLIT_CNTL                             0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK              0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT             8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+       return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR                         0x00010000
 
 #define REG_A6XX_GRAS_2D_SRC_TL_X                              0x00008401
 #define A6XX_GRAS_2D_SRC_TL_X_X__MASK                          0x00ffff00
@@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
        return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
 }
 #define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND                   0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE                   0x00000400
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK                   0xffff0000
 #define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT                  16
 static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
 #define REG_A6XX_RB_SAMPLE_COUNT_CONTROL                       0x00008891
 #define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY                      0x00000002
 
+#define REG_A6XX_RB_LRZ_CNTL                                   0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE                                        0x00000001
+
 #define REG_A6XX_RB_UNKNOWN_88D0                               0x000088d0
 
 #define REG_A6XX_RB_BLIT_SCISSOR_TL                            0x000088d1
@@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
        return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
 }
 
+#define REG_A6XX_RB_MSAA_CNTL                                  0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK                                0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT                       3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
 #define REG_A6XX_RB_BLIT_BASE_GMEM                             0x000088d6
 
 #define REG_A6XX_RB_BLIT_DST_INFO                              0x000088d7
@@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
        return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
 }
 #define A6XX_RB_BLIT_DST_INFO_FLAGS                            0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK                    0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT                   3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK               0x00007f80
 #define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT              7
 static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
 {
        return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
 }
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR                           0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01                               0x00008c01
 
 #define REG_A6XX_RB_2D_DST_INFO                                        0x00008c17
 #define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK                 0x000000ff
@@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
 #define REG_A6XX_SP_BLEND_CNTL                                 0x0000a989
 #define A6XX_SP_BLEND_CNTL_ENABLED                             0x00000001
 #define A6XX_SP_BLEND_CNTL_UNK8                                        0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE                   0x00000400
 
 #define REG_A6XX_SP_SRGB_CNTL                                  0x0000a98a
 #define A6XX_SP_SRGB_CNTL_SRGB_MRT0                            0x00000001
@@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
 
 #define REG_A6XX_SP_UNKNOWN_AB20                               0x0000ab20
 
+#define REG_A6XX_SP_UNKNOWN_ACC0                               0x0000acc0
+
 #define REG_A6XX_SP_UNKNOWN_AE00                               0x0000ae00
 
 #define REG_A6XX_SP_UNKNOWN_AE03                               0x0000ae03
@@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
        return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
 }
 #define A6XX_SP_PS_2D_SRC_INFO_FLAGS                           0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER                          0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE                             0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK                     0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT                    0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+       return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK                    0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT                   15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+       return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
 
 #define REG_A6XX_SP_PS_2D_SRC_LO                               0x0000b4c2
 
 #define REG_A6XX_SP_PS_2D_SRC_HI                               0x0000b4c3
 
+#define REG_A6XX_SP_PS_2D_SRC_PITCH                            0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK                    0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT                   9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+       return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
 #define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO                         0x0000b4ca
 
 #define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI                         0x0000b4cb
@@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
 {
        return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
 }
+#define A6XX_TEX_CONST_0_SAMPLES__MASK                         0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT                                20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+       return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
 #define A6XX_TEX_CONST_0_FMT__MASK                             0x3fc00000
 #define A6XX_TEX_CONST_0_FMT__SHIFT                            22
 static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
 
 #define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2                 0x00000030
 
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0                   0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1                   0x00000002
+
 
 #endif /* A6XX_XML */
index d4e98e5876bc4a7fbab76c9760c2e548afdfea73..c58e953fefa3b25343e1b7d3ebeb8ce1277c8b8f 100644 (file)
@@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+       u32 val;
+
+       /* This can be called from gpu state code so make sure GMU is valid */
+       if (IS_ERR_OR_NULL(gmu->mmio))
+               return false;
+
+       val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+       return !(val &
+               (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+               A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
 /* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
 {
-       u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+       u32 val;
+
+       /* This can be called from gpu state code so make sure GMU is valid */
+       if (IS_ERR_OR_NULL(gmu->mmio))
+               return false;
+
+       val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
 
        return !(val &
                (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
                val == 0xbabeface, 100, 10000);
 
        if (ret)
-               dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+               DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
 
        return ret;
 }
@@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
        ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
                val & 1, 100, 10000);
        if (ret)
-               dev_err(gmu->dev, "Unable to start the HFI queues\n");
+               DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
 
        return ret;
 }
@@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
                val & (1 << ack), 100, 10000);
 
        if (ret)
-               dev_err(gmu->dev,
+               DRM_DEV_ERROR(gmu->dev,
                        "Timeout waiting for GMU OOB set %s: 0x%x\n",
                                name,
                                gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
                (val & 0x38) == 0x28, 1, 100);
 
        if (ret) {
-               dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+               DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
                        gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
        }
 
@@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
                (val & 0x04), 100, 10000);
 
        if (ret)
-               dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+               DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
                        gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
 }
 
@@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
                /* Check to see if the GMU really did slumber */
                if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
                        != 0x0f) {
-                       dev_err(gmu->dev, "The GMU did not go into slumber\n");
+                       DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
                        ret = -ETIMEDOUT;
                }
        }
@@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
        ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
                val & (1 << 1), 100, 10000);
        if (ret) {
-               dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+               DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
                return ret;
        }
 
        ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
                !val, 100, 10000);
 
-       if (!ret) {
-               gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
-               /* Re-enable the power counter */
-               gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
-               return 0;
+       if (ret) {
+               DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+               return ret;
        }
 
-       dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
-       return ret;
+       gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+       /* Set up CX GMU counter 0 to count busy ticks */
+       gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+       gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+       /* Enable the power counter */
+       gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+       return 0;
 }
 
 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
        ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
                val, val & (1 << 16), 100, 10000);
        if (ret)
-               dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+               DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
 
        gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
 }
@@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
 
                /* Sanity check the size of the firmware that was loaded */
                if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
-                       dev_err(gmu->dev,
+                       DRM_DEV_ERROR(gmu->dev,
                                "GMU firmware is bigger than the available region\n");
                        return -EINVAL;
                }
@@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
                 */
 
                if (ret)
-                       dev_err(gmu->dev,
+                       DRM_DEV_ERROR(gmu->dev,
                                "Unable to slumber GMU: status = 0%x/0%x\n",
                                gmu_read(gmu,
                                        REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
                        IOMMU_READ | IOMMU_WRITE);
 
                if (ret) {
-                       dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+                       DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
 
                        for (i = i - 1 ; i >= 0; i--)
                                iommu_unmap(gmu->domain,
@@ -969,12 +994,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
                }
 
                if (j == pri_count) {
-                       dev_err(dev,
+                       DRM_DEV_ERROR(dev,
                                "Level %u not found in in the RPMh list\n",
                                        level);
-                       dev_err(dev, "Available levels:\n");
+                       DRM_DEV_ERROR(dev, "Available levels:\n");
                        for (j = 0; j < pri_count; j++)
-                               dev_err(dev, "  %u\n", pri[j]);
+                               DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
 
                        return -EINVAL;
                }
@@ -1081,7 +1106,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
         */
        ret = dev_pm_opp_of_add_table(gmu->dev);
        if (ret) {
-               dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+               DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
                return ret;
        }
 
@@ -1122,13 +1147,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
                        IORESOURCE_MEM, name);
 
        if (!res) {
-               dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+               DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
                return ERR_PTR(-EINVAL);
        }
 
        ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
        if (!ret) {
-               dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+               DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1145,7 +1170,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
        ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
                name, gmu);
        if (ret) {
-               dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+               DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
                return ret;
        }
 
index 35f765afae45b7e427faa837e51e56f021a6d487..c721d9165d8ec61a4ba0efd833de3bc67b74d40d 100644 (file)
@@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
 void a6xx_hfi_stop(struct a6xx_gmu *gmu);
 
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
 #endif
index db56f263ed77fa35a9551d4b3b60726dfd9b6c3b..1cc1c135236b1a85a710de55a70f8c00ca6637ff 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
index 631257c297fd0db6bb74565f76859d5419c1f25a..fefe773c989e0760d0a29fee2c0b7224aa6d74ef 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "msm_gem.h"
 #include "msm_mmu.h"
+#include "msm_gpu_trace.h"
 #include "a6xx_gpu.h"
 #include "a6xx_gmu.xml.h"
 
@@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
 }
 
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+               u64 iova)
+{
+       OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+       OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+       OUT_RING(ring, lower_32_bits(iova));
+       OUT_RING(ring, upper_32_bits(iova));
+}
+
 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_file_private *ctx)
 {
+       unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
        struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        struct msm_ringbuffer *ring = submit->ring;
        unsigned int i;
 
+       get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+               rbmemptr_stats(ring, index, cpcycles_start));
+
+       /*
+        * For PM4 the GMU register offsets are calculated from the base of the
+        * GPU registers so we need to add 0x1a800 to the register value on A630
+        * to get the right value from PM4.
+        */
+       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+               rbmemptr_stats(ring, index, alwayson_start));
+
        /* Invalidate CCU depth and color */
        OUT_PKT7(ring, CP_EVENT_WRITE, 1);
        OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                }
        }
 
+       get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+               rbmemptr_stats(ring, index, cpcycles_end));
+       get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+               rbmemptr_stats(ring, index, alwayson_end));
+
        /* Write the fence to the scratch register */
        OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
        OUT_RING(ring, submit->seqno);
@@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
        OUT_RING(ring, submit->seqno);
 
+       trace_msm_gpu_submit_flush(submit,
+               gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+                       REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
        a6xx_flush(gpu, ring);
 }
 
@@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
 
                        return ret;
                }
+
+               msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        /* Select CP0 to always count cycles */
        gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
 
-       /* FIXME: not sure if this should live here or in a6xx_gmu.c */
-       gmu_write(&a6xx_gpu->gmu,  REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
-               0xff000000);
-       gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
-               0xff, 0x20);
-       gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
-               0x01);
-
        gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
        gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
        gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -481,7 +508,7 @@ out:
 
 static void a6xx_dump(struct msm_gpu *gpu)
 {
-       dev_info(&gpu->pdev->dev, "status:   %08x\n",
+       DRM_DEV_INFO(&gpu->pdev->dev, "status:   %08x\n",
                        gpu_read(gpu, REG_A6XX_RBBM_STATUS));
        adreno_dump(gpu);
 }
@@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
        adreno_dump_info(gpu);
 
        for (i = 0; i < 8; i++)
-               dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+               DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
                        gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
 
        if (hang_debug)
@@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
        REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
 };
 
-static const u32 a6xx_registers[] = {
-       0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
-       0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
-       0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
-       0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
-       0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
-       0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
-       0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
-       0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
-       0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
-       0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
-       0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
-       0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
-       0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
-       0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
-       0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
-       0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
-       0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
-       0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
-       0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
-       0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
-       0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
-       0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
-       0xa610, 0xa617, 0xa630, 0xa630,
-       ~0
-};
-
 static int a6xx_pm_resume(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        return 0;
 }
 
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
-               struct drm_printer *p)
-{
-       adreno_show(gpu, state, p);
-}
-#endif
-
 static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
        if (a6xx_gpu->sqe_bo) {
-               if (a6xx_gpu->sqe_iova)
-                       msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+               msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
                drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
        }
 
@@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_busy = a6xx_gpu_busy,
                .gpu_get_freq = a6xx_gmu_get_freq,
                .gpu_set_freq = a6xx_gmu_set_freq,
+               .gpu_state_get = a6xx_gpu_state_get,
+               .gpu_state_put = a6xx_gpu_state_put,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
@@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
        adreno_gpu = &a6xx_gpu->base;
        gpu = &adreno_gpu->base;
 
-       adreno_gpu->registers = a6xx_registers;
+       adreno_gpu->registers = NULL;
        adreno_gpu->reg_offsets = a6xx_register_offsets;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
index 4127dcebc20254a960d4ad565353fa370f69ae8a..528a4cfe07cda3f5d0963ed341f7856c19d93b6f 100644 (file)
@@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
 
 int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
 void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+               struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
 #endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644 (file)
index 0000000..e686331
--- /dev/null
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+       const void *handle;
+       u32 *data;
+};
+
+struct a6xx_gpu_state {
+       struct msm_gpu_state base;
+
+       struct a6xx_gpu_state_obj *gmu_registers;
+       int nr_gmu_registers;
+
+       struct a6xx_gpu_state_obj *registers;
+       int nr_registers;
+
+       struct a6xx_gpu_state_obj *shaders;
+       int nr_shaders;
+
+       struct a6xx_gpu_state_obj *clusters;
+       int nr_clusters;
+
+       struct a6xx_gpu_state_obj *dbgahb_clusters;
+       int nr_dbgahb_clusters;
+
+       struct a6xx_gpu_state_obj *indexed_regs;
+       int nr_indexed_regs;
+
+       struct a6xx_gpu_state_obj *debugbus;
+       int nr_debugbus;
+
+       struct a6xx_gpu_state_obj *vbif_debugbus;
+
+       struct a6xx_gpu_state_obj *cx_debugbus;
+       int nr_cx_debugbus;
+
+       struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+       in[0] = val;
+       in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+       return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+       in[0] = target;
+       in[1] = (((u64) reg) << 44 | dwords);
+
+       return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+       in[0] = 0;
+       in[1] = 0;
+
+       return 2;
+}
+
+struct a6xx_crashdumper {
+       void *ptr;
+       struct drm_gem_object *bo;
+       u64 iova;
+};
+
+struct a6xx_state_memobj {
+       struct list_head node;
+       unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+       struct a6xx_state_memobj *obj =
+               kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+       if (!obj)
+               return NULL;
+
+       list_add_tail(&obj->node, &a6xx_state->objs);
+       return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+               size_t size)
+{
+       void *dst = state_kcalloc(a6xx_state, 1, size);
+
+       if (dst)
+               memcpy(dst, src, size);
+       return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE  (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+               struct a6xx_crashdumper *dumper)
+{
+       dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+               SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+               &dumper->bo, &dumper->iova);
+
+       if (!IS_ERR(dumper->ptr))
+               msm_gem_object_set_name(dumper->bo, "crashdump");
+
+       return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+               struct a6xx_crashdumper *dumper)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       u32 val;
+       int ret;
+
+       if (IS_ERR_OR_NULL(dumper->ptr))
+               return -EINVAL;
+
+       if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+               return -EINVAL;
+
+       /* Make sure all pending memory writes are posted */
+       wmb();
+
+       gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+               REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+       gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+       ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+               val & 0x02, 100, 10000);
+
+       gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+       return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+               u32 *data)
+{
+       u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+               A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+       /* Wait 1 us to make sure the data is flowing */
+       udelay(1);
+
+       data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+       data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+       return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+       msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+       msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+               u32 *data)
+{
+       u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+               A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+       cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+       cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+       cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+       cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+       /* Wait 1 us to make sure the data is flowing */
+       udelay(1);
+
+       data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+       data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+       return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+               u32 reg, int count, u32 *data)
+{
+       int i;
+
+       gpu_write(gpu, ctrl0, reg);
+
+       for (i = 0; i < count; i++) {
+               gpu_write(gpu, ctrl1, i);
+               data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+       }
+
+       return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+       ((16 * AXI_ARB_BLOCKS) + \
+        (18 * XIN_AXI_BLOCKS) + \
+        (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               struct a6xx_gpu_state_obj *obj)
+{
+       u32 clk, *ptr;
+       int i;
+
+       obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+               sizeof(u32));
+       if (!obj->data)
+               return;
+
+       obj->handle = NULL;
+
+       /* Get the current clock setting */
+       clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+       /* Force on the bus so we can read it */
+       gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+               clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+       /* We will read from BUS2 first, so disable BUS1 */
+       gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+       /* Enable the VBIF bus for reading */
+       gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+       ptr = obj->data;
+
+       for (i = 0; i < AXI_ARB_BLOCKS; i++)
+               ptr += vbif_debugbus_read(gpu,
+                       REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+                       REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+                       1 << (i + 16), 16, ptr);
+
+       for (i = 0; i < XIN_AXI_BLOCKS; i++)
+               ptr += vbif_debugbus_read(gpu,
+                       REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+                       REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+                       1 << i, 18, ptr);
+
+       /* Stop BUS2 so we can turn on BUS1 */
+       gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+       for (i = 0; i < XIN_CORE_BLOCKS; i++)
+               ptr += vbif_debugbus_read(gpu,
+                       REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+                       REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+                       1 << i, 12, ptr);
+
+       /* Restore the VBIF clock setting */
+       gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_debugbus_block *block,
+               struct a6xx_gpu_state_obj *obj)
+{
+       int i;
+       u32 *ptr;
+
+       obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+       if (!obj->data)
+               return;
+
+       obj->handle = block;
+
+       for (ptr = obj->data, i = 0; i < block->count; i++)
+               ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_debugbus_block *block,
+               struct a6xx_gpu_state_obj *obj)
+{
+       int i;
+       u32 *ptr;
+
+       obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+       if (!obj->data)
+               return;
+
+       obj->handle = block;
+
+       for (ptr = obj->data, i = 0; i < block->count; i++)
+               ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state)
+{
+       struct resource *res;
+       void __iomem *cxdbg = NULL;
+
+       /* Set up the GX debug bus */
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+               A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+               A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+       gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+       /* Set up the CX debug bus - it lives elsewhere in the system so do a
+        * temporary ioremap for the registers
+        */
+       res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+                       "cx_dbgc");
+
+       if (res)
+               cxdbg = ioremap(res->start, resource_size(res));
+
+       if (cxdbg) {
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+                       A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+                       A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+                       0x76543210);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+                       0xFEDCBA98);
+
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+               cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+       }
+
+       a6xx_state->debugbus = state_kcalloc(a6xx_state,
+               ARRAY_SIZE(a6xx_debugbus_blocks),
+               sizeof(*a6xx_state->debugbus));
+
+       if (a6xx_state->debugbus) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+                       a6xx_get_debugbus_block(gpu,
+                               a6xx_state,
+                               &a6xx_debugbus_blocks[i],
+                               &a6xx_state->debugbus[i]);
+
+               a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+       }
+
+       a6xx_state->vbif_debugbus =
+               state_kcalloc(a6xx_state, 1,
+                       sizeof(*a6xx_state->vbif_debugbus));
+
+       if (a6xx_state->vbif_debugbus)
+               a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+                       a6xx_state->vbif_debugbus);
+
+       if (cxdbg) {
+               a6xx_state->cx_debugbus =
+                       state_kcalloc(a6xx_state,
+                       ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+                       sizeof(*a6xx_state->cx_debugbus));
+
+               if (a6xx_state->cx_debugbus) {
+                       int i;
+
+                       for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+                               a6xx_get_cx_debugbus_block(cxdbg,
+                                       a6xx_state,
+                                       &a6xx_cx_debugbus_blocks[i],
+                                       &a6xx_state->cx_debugbus[i]);
+
+                       a6xx_state->nr_cx_debugbus =
+                               ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+               }
+
+               iounmap(cxdbg);
+       }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_dbgahb_cluster *dbgahb,
+               struct a6xx_gpu_state_obj *obj,
+               struct a6xx_crashdumper *dumper)
+{
+       u64 *in = dumper->ptr;
+       u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+       size_t datasize;
+       int i, regcount = 0;
+
+       for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+               int j;
+
+               in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+                       (dbgahb->statetype + i * 2) << 8);
+
+               for (j = 0; j < dbgahb->count; j += 2) {
+                       int count = RANGE(dbgahb->registers, j);
+                       u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+                               dbgahb->registers[j] - (dbgahb->base >> 2);
+
+                       in += CRASHDUMP_READ(in, offset, count, out);
+
+                       out += count * sizeof(u32);
+
+                       if (i == 0)
+                               regcount += count;
+               }
+       }
+
+       CRASHDUMP_FINI(in);
+
+       datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+       if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+               return;
+
+       if (a6xx_crashdumper_run(gpu, dumper))
+               return;
+
+       obj->handle = dbgahb;
+       obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+               datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               struct a6xx_crashdumper *dumper)
+{
+       int i;
+
+       a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+               ARRAY_SIZE(a6xx_dbgahb_clusters),
+               sizeof(*a6xx_state->dbgahb_clusters));
+
+       if (!a6xx_state->dbgahb_clusters)
+               return;
+
+       a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+               a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+                       &a6xx_dbgahb_clusters[i],
+                       &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_cluster *cluster,
+               struct a6xx_gpu_state_obj *obj,
+               struct a6xx_crashdumper *dumper)
+{
+       u64 *in = dumper->ptr;
+       u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+       size_t datasize;
+       int i, regcount = 0;
+
+       /* Some clusters need a selector register to be programmed too */
+       if (cluster->sel_reg)
+               in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+       for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+               int j;
+
+               in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+                       (cluster->id << 8) | (i << 4) | i);
+
+               for (j = 0; j < cluster->count; j += 2) {
+                       int count = RANGE(cluster->registers, j);
+
+                       in += CRASHDUMP_READ(in, cluster->registers[j],
+                               count, out);
+
+                       out += count * sizeof(u32);
+
+                       if (i == 0)
+                               regcount += count;
+               }
+       }
+
+       CRASHDUMP_FINI(in);
+
+       datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+       if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+               return;
+
+       if (a6xx_crashdumper_run(gpu, dumper))
+               return;
+
+       obj->handle = cluster;
+       obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+               datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               struct a6xx_crashdumper *dumper)
+{
+       int i;
+
+       a6xx_state->clusters = state_kcalloc(a6xx_state,
+               ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+       if (!a6xx_state->clusters)
+               return;
+
+       a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+               a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+                       &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_shader_block *block,
+               struct a6xx_gpu_state_obj *obj,
+               struct a6xx_crashdumper *dumper)
+{
+       u64 *in = dumper->ptr;
+       size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+       int i;
+
+       if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+               return;
+
+       for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+               in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+                       (block->type << 8) | i);
+
+               in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+                       block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+       }
+
+       CRASHDUMP_FINI(in);
+
+       if (a6xx_crashdumper_run(gpu, dumper))
+               return;
+
+       obj->handle = block;
+       obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+               datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               struct a6xx_crashdumper *dumper)
+{
+       int i;
+
+       a6xx_state->shaders = state_kcalloc(a6xx_state,
+               ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+       if (!a6xx_state->shaders)
+               return;
+
+       a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+               a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+                       &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_registers *regs,
+               struct a6xx_gpu_state_obj *obj,
+               struct a6xx_crashdumper *dumper)
+
+{
+       u64 *in = dumper->ptr;
+       u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+       int i, regcount = 0;
+
+       in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+       for (i = 0; i < regs->count; i += 2) {
+               u32 count = RANGE(regs->registers, i);
+               u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+                       regs->registers[i] - (regs->val0 >> 2);
+
+               in += CRASHDUMP_READ(in, offset, count, out);
+
+               out += count * sizeof(u32);
+               regcount += count;
+       }
+
+       CRASHDUMP_FINI(in);
+
+       if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+               return;
+
+       if (a6xx_crashdumper_run(gpu, dumper))
+               return;
+
+       obj->handle = regs;
+       obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+               regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_registers *regs,
+               struct a6xx_gpu_state_obj *obj,
+               struct a6xx_crashdumper *dumper)
+
+{
+       u64 *in = dumper->ptr;
+       u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+       int i, regcount = 0;
+
+       /* Some blocks might need to program a selector register first */
+       if (regs->val0)
+               in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+       for (i = 0; i < regs->count; i += 2) {
+               u32 count = RANGE(regs->registers, i);
+
+               in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+               out += count * sizeof(u32);
+               regcount += count;
+       }
+
+       CRASHDUMP_FINI(in);
+
+       if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+               return;
+
+       if (a6xx_crashdumper_run(gpu, dumper))
+               return;
+
+       obj->handle = regs;
+       obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+               regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_registers *regs,
+               struct a6xx_gpu_state_obj *obj)
+{
+       int i, regcount = 0, index = 0;
+
+       for (i = 0; i < regs->count; i += 2)
+               regcount += RANGE(regs->registers, i);
+
+       obj->handle = (const void *) regs;
+       obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+       if (!obj->data)
+               return;
+
+       for (i = 0; i < regs->count; i += 2) {
+               u32 count = RANGE(regs->registers, i);
+               int j;
+
+               for (j = 0; j < count; j++)
+                       obj->data[index++] = gpu_read(gpu,
+                               regs->registers[i] + j);
+       }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_registers *regs,
+               struct a6xx_gpu_state_obj *obj)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+       int i, regcount = 0, index = 0;
+
+       for (i = 0; i < regs->count; i += 2)
+               regcount += RANGE(regs->registers, i);
+
+       obj->handle = (const void *) regs;
+       obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+       if (!obj->data)
+               return;
+
+       for (i = 0; i < regs->count; i += 2) {
+               u32 count = RANGE(regs->registers, i);
+               int j;
+
+               for (j = 0; j < count; j++)
+                       obj->data[index++] = gmu_read(gmu,
+                               regs->registers[i] + j);
+       }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+       a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+               2, sizeof(*a6xx_state->gmu_registers));
+
+       if (!a6xx_state->gmu_registers)
+               return;
+
+       a6xx_state->nr_gmu_registers = 2;
+
+       /* Get the CX GMU registers from AHB */
+       _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+               &a6xx_state->gmu_registers[0]);
+
+       if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+               return;
+
+       /* Set the fence to ALLOW mode so we can access the registers */
+       gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+       _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+               &a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               struct a6xx_crashdumper *dumper)
+{
+       int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+               ARRAY_SIZE(a6xx_reglist) +
+               ARRAY_SIZE(a6xx_hlsq_reglist);
+       int index = 0;
+
+       a6xx_state->registers = state_kcalloc(a6xx_state,
+               count, sizeof(*a6xx_state->registers));
+
+       if (!a6xx_state->registers)
+               return;
+
+       a6xx_state->nr_registers = count;
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+               a6xx_get_ahb_gpu_registers(gpu,
+                       a6xx_state, &a6xx_ahb_reglist[i],
+                       &a6xx_state->registers[index++]);
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+               a6xx_get_crashdumper_registers(gpu,
+                       a6xx_state, &a6xx_reglist[i],
+                       &a6xx_state->registers[index++],
+                       dumper);
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+               a6xx_get_crashdumper_hlsq_registers(gpu,
+                       a6xx_state, &a6xx_hlsq_reglist[i],
+                       &a6xx_state->registers[index++],
+                       dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state,
+               const struct a6xx_indexed_registers *indexed,
+               struct a6xx_gpu_state_obj *obj)
+{
+       int i;
+
+       obj->handle = (const void *) indexed;
+       obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+       if (!obj->data)
+               return;
+
+       /* All the indexed banks start at address 0 */
+       gpu_write(gpu, indexed->addr, 0);
+
+       /* Read the data - each read increments the internal address by 1 */
+       for (i = 0; i < indexed->count; i++)
+               obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+               struct a6xx_gpu_state *a6xx_state)
+{
+       u32 mempool_size;
+       int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+       int i;
+
+       a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+               sizeof(a6xx_state->indexed_regs));
+       if (!a6xx_state->indexed_regs)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+               a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+                       &a6xx_state->indexed_regs[i]);
+
+       /* Set the CP mempool size to 0 to stabilize it while dumping */
+       mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+       gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+       /* Get the contents of the CP mempool */
+       a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+               &a6xx_state->indexed_regs[i]);
+
+       /*
+        * Offset 0x2000 in the mempool is the size - copy the saved size over
+        * so the data is consistent
+        */
+       a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+       /* Restore the size in the hardware */
+       gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+       a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+       struct a6xx_crashdumper dumper = { 0 };
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+               GFP_KERNEL);
+
+       if (!a6xx_state)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&a6xx_state->objs);
+
+       /* Get the generic state from the adreno core */
+       adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+       a6xx_get_gmu_registers(gpu, a6xx_state);
+
+       /* If GX isn't on the rest of the data isn't going to be accessible */
+       if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+               return &a6xx_state->base;
+
+       /* Get the banks of indexed registers */
+       a6xx_get_indexed_registers(gpu, a6xx_state);
+
+       /* Try to initialize the crashdumper */
+       if (!a6xx_crashdumper_init(gpu, &dumper)) {
+               a6xx_get_registers(gpu, a6xx_state, &dumper);
+               a6xx_get_shaders(gpu, a6xx_state, &dumper);
+               a6xx_get_clusters(gpu, a6xx_state, &dumper);
+               a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+               msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+       }
+
+       a6xx_get_debugbus(gpu, a6xx_state);
+
+       return  &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+       struct a6xx_state_memobj *obj, *tmp;
+       struct msm_gpu_state *state = container_of(kref,
+                       struct msm_gpu_state, ref);
+       struct a6xx_gpu_state *a6xx_state = container_of(state,
+                       struct a6xx_gpu_state, base);
+
+       list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+               kfree(obj);
+
+       adreno_gpu_state_destroy(state);
+       kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+       if (IS_ERR_OR_NULL(state))
+               return 1;
+
+       return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+               struct drm_printer *p)
+{
+       int i, index = 0;
+
+       if (!data)
+               return;
+
+       for (i = 0; i < count; i += 2) {
+               u32 count = RANGE(registers, i);
+               u32 offset = registers[i];
+               int j;
+
+               for (j = 0; j < count; index++, offset++, j++) {
+                       if (data[index] == 0xdeafbead)
+                               continue;
+
+                       drm_printf(p, "  - { offset: 0x%06x, value: 0x%08x }\n",
+                               offset << 2, data[index]);
+               }
+       }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+       char out[ASCII85_BUFSZ];
+       long i, l, datalen = 0;
+
+       for (i = 0; i < len >> 2; i++) {
+               if (data[i])
+                       datalen = (i + 1) << 2;
+       }
+
+       if (datalen == 0)
+               return;
+
+       drm_puts(p, "    data: !!ascii85 |\n");
+       drm_puts(p, "      ");
+
+
+       l = ascii85_encode_len(datalen);
+
+       for (i = 0; i < l; i++)
+               drm_puts(p, ascii85_encode(data[i], out));
+
+       drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+       drm_puts(p, fmt);
+       drm_puts(p, name);
+       drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+               struct drm_printer *p)
+{
+       const struct a6xx_shader_block *block = obj->handle;
+       int i;
+
+       if (!obj->handle)
+               return;
+
+       print_name(p, "  - type: ", block->name);
+
+       for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+               drm_printf(p, "    - bank: %d\n", i);
+               drm_printf(p, "      size: %d\n", block->size);
+
+               if (!obj->data)
+                       continue;
+
+               print_ascii85(p, block->size << 2,
+                       obj->data + (block->size * i));
+       }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+               struct drm_printer *p)
+{
+       int ctx, index = 0;
+
+       for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+               int j;
+
+               drm_printf(p, "    - context: %d\n", ctx);
+
+               for (j = 0; j < size; j += 2) {
+                       u32 count = RANGE(registers, j);
+                       u32 offset = registers[j];
+                       int k;
+
+                       for (k = 0; k < count; index++, offset++, k++) {
+                               if (data[index] == 0xdeafbead)
+                                       continue;
+
+                               drm_printf(p, "      - { offset: 0x%06x, value: 0x%08x }\n",
+                                       offset << 2, data[index]);
+                       }
+               }
+       }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+               struct drm_printer *p)
+{
+       const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+       if (dbgahb) {
+               print_name(p, "  - cluster-name: ", dbgahb->name);
+               a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+                       obj->data, p);
+       }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+               struct drm_printer *p)
+{
+       const struct a6xx_cluster *cluster = obj->handle;
+
+       if (cluster) {
+               print_name(p, "  - cluster-name: ", cluster->name);
+               a6xx_show_cluster_data(cluster->registers, cluster->count,
+                       obj->data, p);
+       }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+               struct drm_printer *p)
+{
+       const struct a6xx_indexed_registers *indexed = obj->handle;
+
+       if (!indexed)
+               return;
+
+       print_name(p, "  - regs-name: ", indexed->name);
+       drm_printf(p, "    dwords: %d\n", indexed->count);
+
+       print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+               u32 *data, struct drm_printer *p)
+{
+       if (block) {
+               print_name(p, "  - debugbus-block: ", block->name);
+
+               /*
+                * count for regular debugbus data is in quadwords,
+                * but print the size in dwords for consistency
+                */
+               drm_printf(p, "    count: %d\n", block->count << 1);
+
+               print_ascii85(p, block->count << 3, data);
+       }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+               struct drm_printer *p)
+{
+       int i;
+
+       for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+               struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+               a6xx_show_debugbus_block(obj->handle, obj->data, p);
+       }
+
+       if (a6xx_state->vbif_debugbus) {
+               struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+               drm_puts(p, "  - debugbus-block: A6XX_DBGBUS_VBIF\n");
+               drm_printf(p, "    count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+               /* vbif debugbus data is in dwords.  Confusing, huh? */
+               print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+       }
+
+       for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+               struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+               a6xx_show_debugbus_block(obj->handle, obj->data, p);
+       }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+               struct drm_printer *p)
+{
+       struct a6xx_gpu_state *a6xx_state = container_of(state,
+                       struct a6xx_gpu_state, base);
+       int i;
+
+       if (IS_ERR_OR_NULL(state))
+               return;
+
+       adreno_show(gpu, state, p);
+
+       drm_puts(p, "registers:\n");
+       for (i = 0; i < a6xx_state->nr_registers; i++) {
+               struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+               const struct a6xx_registers *regs = obj->handle;
+
+               if (!obj->handle)
+                       continue;
+
+               a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+       }
+
+       drm_puts(p, "registers-gmu:\n");
+       for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+               struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+               const struct a6xx_registers *regs = obj->handle;
+
+               if (!obj->handle)
+                       continue;
+
+               a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+       }
+
+       drm_puts(p, "indexed-registers:\n");
+       for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+               a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+       drm_puts(p, "shader-blocks:\n");
+       for (i = 0; i < a6xx_state->nr_shaders; i++)
+               a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+       drm_puts(p, "clusters:\n");
+       for (i = 0; i < a6xx_state->nr_clusters; i++)
+               a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+       for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+               a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+       drm_puts(p, "debugbus:\n");
+       a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644 (file)
index 0000000..68cccfa
--- /dev/null
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+       0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+       0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+       0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+       0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+       0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+       0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+       0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+       0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+       0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+       0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+       0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+       0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+       0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE    0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS  3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS    5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+       { .id = _id, .name = #_id,\
+               .registers = _reg, \
+               .count = ARRAY_SIZE(_reg), \
+               .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+       u32 id;
+       const char *name;
+       const u32 *registers;
+       size_t count;
+       u32 sel_reg;
+       u32 sel_val;
+} a6xx_clusters[] = {
+       CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+       CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+       CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+       CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+       CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+       CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+       0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+       0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+       0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+       0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+       0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+       0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+       0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+       0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+       0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+       0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+       0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+       0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+       0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+       0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+       0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+       { .name = #_id, .statetype = _type, .base = _base, \
+               .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+       const char *name;
+       u32 statetype;
+       u32 base;
+       const u32 *registers;
+       size_t count;
+} a6xx_dbgahb_clusters[] = {
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+       CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+       0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+       0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+       0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+       0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+       0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+       const u32 *registers;
+       size_t count;
+       u32 val0;
+       u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+       { .val0 = _base, .val1 = _type, .registers = _array, \
+               .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+       HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+       HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+       HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+       { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+       const char *name;
+       u32 type;
+       u32 size;
+} a6xx_shader_blocks[] = {
+       SHADER(A6XX_TP0_TMO_DATA, 0x200),
+       SHADER(A6XX_TP0_SMO_DATA, 0x80),
+       SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+       SHADER(A6XX_TP1_TMO_DATA, 0x200),
+       SHADER(A6XX_TP1_SMO_DATA, 0x80),
+       SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+       SHADER(A6XX_SP_INST_DATA, 0x800),
+       SHADER(A6XX_SP_LB_0_DATA, 0x800),
+       SHADER(A6XX_SP_LB_1_DATA, 0x800),
+       SHADER(A6XX_SP_LB_2_DATA, 0x800),
+       SHADER(A6XX_SP_LB_3_DATA, 0x800),
+       SHADER(A6XX_SP_LB_4_DATA, 0x800),
+       SHADER(A6XX_SP_LB_5_DATA, 0x200),
+       SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+       SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+       SHADER(A6XX_SP_UAV_DATA, 0x80),
+       SHADER(A6XX_SP_INST_TAG, 0x80),
+       SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+       SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+       SHADER(A6XX_SP_SMO_TAG, 0x80),
+       SHADER(A6XX_SP_STATE_DATA, 0x3f),
+       SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+       SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+       SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+       SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+       SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+       SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+       SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+       SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+       SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+       SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+       SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+       SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+       SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+       SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+       SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+       SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+       SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+       SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+       SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+       SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+       SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+       0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+       0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+       0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+       0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+       /* RBBM */
+       0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+       0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+       0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+       0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+       0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+       0x0540, 0x0555,
+       /* CP */
+       0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+       0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+       0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+       0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+       0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+       0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+       0x0a00, 0x0a03,
+       /* VSC */
+       0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+       /* UCHE */
+       0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+       0x0e38, 0x0e39,
+       /* GRAS */
+       0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+       0x8630, 0x8637,
+       /* VPC */
+       0x9600, 0x9604, 0x9624, 0x9637,
+       /* PC */
+       0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+       0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+       0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+       /* VFD */
+       0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+       0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+       { .registers = _array, .count = ARRAY_SIZE(_array), \
+               .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+       REGS(a6xx_registers, 0, 0),
+       REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+       REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+       /* RBBM_STATUS - RBBM_STATUS3 */
+       0x210, 0x213,
+       /* CP_STATUS_1 */
+       0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+       0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+       0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+       0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+       0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+       0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+       0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+       0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+       0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+       0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+       0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+       0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+       REGS(a6xx_ahb_registers, 0, 0),
+       REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+       /* GMU GX */
+       0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+       0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+       0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+       0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+       0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+       /* GMU CX */
+       0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+       0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+       0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+       0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+       0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+       0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+       0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+       0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+       0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+       /* GPU RSCC */
+       0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+       0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+       0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+       /* GMU AO */
+       0x9300, 0x9316, 0x9400, 0x9400,
+       /* GPU CC */
+       0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+       0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+       0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+       0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+       0xb800, 0xb802,
+       /* GPU CC ACD */
+       0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+       REGS(a6xx_gmu_cx_registers, 0, 0),
+       REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+       const char *name;
+       u32 addr;
+       u32 data;
+       u32 count;
+} a6xx_indexed_reglist[] = {
+       { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+               REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+       { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+               REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+       { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+               REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+       { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+               REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+       "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+               REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+       const char *name;
+       u32 id;
+       u32 count;
+} a6xx_debugbus_blocks[] = {
+       DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+       DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+       DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
index 6ff9baec2658cf7876f4806c99c0936c689f123a..eda11abc5f011f1a8ef8bab3feb19274d6c0277f 100644 (file)
@@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
                val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
 
        if (ret) {
-               dev_err(gmu->dev,
+               DRM_DEV_ERROR(gmu->dev,
                        "Message %s id %d timed out waiting for response\n",
                        a6xx_hfi_msg_id[id], seqnum);
                return -ETIMEDOUT;
@@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
 
                /* If the queue is empty our response never made it */
                if (!ret) {
-                       dev_err(gmu->dev,
+                       DRM_DEV_ERROR(gmu->dev,
                                "The HFI response queue is unexpectedly empty\n");
 
                        return -ENOENT;
@@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
                        struct a6xx_hfi_msg_error *error =
                                (struct a6xx_hfi_msg_error *) &resp;
 
-                       dev_err(gmu->dev, "GMU firmware error %d\n",
+                       DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
                                error->code);
                        continue;
                }
 
                if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
-                       dev_err(gmu->dev,
+                       DRM_DEV_ERROR(gmu->dev,
                                "Unexpected message id %d on the response queue\n",
                                HFI_HEADER_SEQNUM(resp.ret_header));
                        continue;
                }
 
                if (resp.error) {
-                       dev_err(gmu->dev,
+                       DRM_DEV_ERROR(gmu->dev,
                                "Message %s id %d returned error %d\n",
                                a6xx_hfi_msg_id[id], seqnum, resp.error);
                        return -EINVAL;
@@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
 
        ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
        if (ret) {
-               dev_err(gmu->dev, "Unable to send message %s id %d\n",
+               DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
                        a6xx_hfi_msg_id[id], seqnum);
                return ret;
        }
@@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
                        continue;
 
                if (queue->header->read_index != queue->header->write_index)
-                       dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+                       DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
 
                queue->header->read_index = 0;
                queue->header->write_index = 0;
index 1318959d504d05f7a5c4ce4237ed36ac3a12194a..641d3ba477b650ec2bae119f99136e6910007b71 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
 #define REG_AXXX_CP_STATE_DEBUG_DATA                           0x000001ed
 
 #define REG_AXXX_CP_INT_CNTL                                   0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK                           0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK                  0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK                     0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK             0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK               0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK                         0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK                          0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK                          0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK                           0x80000000
 
 #define REG_AXXX_CP_INT_STATUS                                 0x000001f3
 
index 86abdb2b3a9cdddc31e986163e17bc0f7c49bd5e..714ed6505e47bda1d107412e058d2c00c5877afa 100644 (file)
@@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
 
 static const struct adreno_info gpulist[] = {
        {
+               .rev   = ADRENO_REV(2, 0, 0, 0),
+               .revn  = 200,
+               .name  = "A200",
+               .fw = {
+                       [ADRENO_FW_PM4] = "yamato_pm4.fw",
+                       [ADRENO_FW_PFP] = "yamato_pfp.fw",
+               },
+               .gmem  = SZ_256K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+               .init  = a2xx_gpu_init,
+       }, { /* a200 on i.mx51 has only 128kib gmem */
+               .rev   = ADRENO_REV(2, 0, 0, 1),
+               .revn  = 201,
+               .name  = "A200",
+               .fw = {
+                       [ADRENO_FW_PM4] = "yamato_pm4.fw",
+                       [ADRENO_FW_PFP] = "yamato_pfp.fw",
+               },
+               .gmem  = SZ_128K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+               .init  = a2xx_gpu_init,
+       }, {
+               .rev   = ADRENO_REV(2, 2, 0, ANY_ID),
+               .revn  = 220,
+               .name  = "A220",
+               .fw = {
+                       [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+                       [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+               },
+               .gmem  = SZ_512K,
+               .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+               .init  = a2xx_gpu_init,
+       }, {
                .rev   = ADRENO_REV(3, 0, 5, ANY_ID),
                .revn  = 305,
                .name  = "A305",
@@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
 
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0) {
-               dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
                return NULL;
        }
 
@@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
        mutex_unlock(&dev->struct_mutex);
        pm_runtime_put_autosuspend(&pdev->dev);
        if (ret) {
-               dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
                return NULL;
        }
 
@@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
        if (ret == 0) {
                unsigned int r, patch;
 
-               if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+               if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+                   sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
                        rev->core = r / 100;
                        r %= 100;
                        rev->major = r / 10;
@@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
        /* and if that fails, fall back to legacy "qcom,chipid" property: */
        ret = of_property_read_u32(node, "qcom,chipid", &chipid);
        if (ret) {
-               dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+               DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
                return ret;
        }
 
@@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
        static struct adreno_platform_config config = {};
        const struct adreno_info *info;
        struct drm_device *drm = dev_get_drvdata(master);
+       struct msm_drm_private *priv = drm->dev_private;
        struct msm_gpu *gpu;
        int ret;
 
@@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
        DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
                config.rev.minor, config.rev.patchid);
 
+       priv->is_a2xx = config.rev.core == 2;
+
        gpu = info->init(drm);
        if (IS_ERR(gpu)) {
                dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
                .unbind = adreno_unbind,
 };
 
+static void adreno_device_register_headless(void)
+{
+       /* on imx5, we don't have a top-level mdp/dpu node
+        * this creates a dummy node for the driver for that case
+        */
+       struct platform_device_info dummy_info = {
+               .parent = NULL,
+               .name = "msm",
+               .id = -1,
+               .res = NULL,
+               .num_res = 0,
+               .data = NULL,
+               .size_data = 0,
+               .dma_mask = ~0,
+       };
+       platform_device_register_full(&dummy_info);
+}
+
 static int adreno_probe(struct platform_device *pdev)
 {
-       return component_add(&pdev->dev, &a3xx_ops);
+
+       int ret;
+
+       ret = component_add(&pdev->dev, &a3xx_ops);
+       if (ret)
+               return ret;
+
+       if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+               adreno_device_register_headless();
+
+       return 0;
 }
 
 static int adreno_remove(struct platform_device *pdev)
@@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
 static const struct of_device_id dt_match[] = {
        { .compatible = "qcom,adreno" },
        { .compatible = "qcom,adreno-3xx" },
+       /* for compatibility with imx5 gpu: */
+       { .compatible = "amd,imageon" },
        /* for backwards compat w/ downstream kgsl DT files: */
        { .compatible = "qcom,kgsl-3d0" },
        {}
index 93d70f4a2154e289be09a8dd6ae8a038e007ec7b..2e4372ef17a34fd2fc3028c6f8a543477ce54247 100644 (file)
@@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
                ret = request_firmware_direct(&fw, newname, drm->dev);
                if (!ret) {
-                       dev_info(drm->dev, "loaded %s from new location\n",
+                       DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_NEW;
                        goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-                       dev_err(drm->dev, "failed to load %s: %d\n",
+                       DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
                                newname, ret);
                        fw = ERR_PTR(ret);
                        goto out;
@@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
                ret = request_firmware_direct(&fw, fwname, drm->dev);
                if (!ret) {
-                       dev_info(drm->dev, "loaded %s from legacy location\n",
+                       DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_LEGACY;
                        goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-                       dev_err(drm->dev, "failed to load %s: %d\n",
+                       DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
                                fwname, ret);
                        fw = ERR_PTR(ret);
                        goto out;
@@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
 
                ret = request_firmware(&fw, newname, drm->dev);
                if (!ret) {
-                       dev_info(drm->dev, "loaded %s with helper\n",
+                       DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
                                newname);
                        adreno_gpu->fwloc = FW_LOCATION_HELPER;
                        goto out;
                } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
-                       dev_err(drm->dev, "failed to load %s: %d\n",
+                       DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
                                newname, ret);
                        fw = ERR_PTR(ret);
                        goto out;
                }
        }
 
-       dev_err(drm->dev, "failed to load %s\n", fwname);
+       DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
        fw = ERR_PTR(-ENOENT);
 out:
        kfree(newname);
@@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
                if (!ring)
                        continue;
 
-               ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
-               if (ret) {
-                       ring->iova = 0;
-                       dev_err(gpu->dev->dev,
-                               "could not map ringbuffer %d: %d\n", i, ret);
-                       return ret;
-               }
-
                ring->cur = ring->start;
                ring->next = ring->start;
 
@@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
 
        ret = msm_gpu_hw_init(gpu);
        if (ret) {
-               dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
                /* hmm, oh well? */
        }
 }
@@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                 */
                OUT_PKT3(ring, CP_EVENT_WRITE, 1);
                OUT_RING(ring, HLSQ_FLUSH);
-
-               OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
-               OUT_RING(ring, 0x00000000);
        }
 
-       /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
-       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-       OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
-       OUT_RING(ring, rbmemptr(ring, fence));
-       OUT_RING(ring, submit->seqno);
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       if (!adreno_is_a2xx(adreno_gpu)) {
+               /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+               OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+               OUT_RING(ring, rbmemptr(ring, fence));
+               OUT_RING(ring, submit->seqno);
+       } else {
+               /* BIT(31) means something else on a2xx */
+               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+               OUT_RING(ring, CACHE_FLUSH_TS);
+               OUT_RING(ring, rbmemptr(ring, fence));
+               OUT_RING(ring, submit->seqno);
+               OUT_PKT3(ring, CP_INTERRUPT, 1);
+               OUT_RING(ring, 0x80000000);
+       }
 
 #if 0
        if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
                                size = j + 1;
 
                if (size) {
-                       state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+                       state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
                        if (state->ring[i].data) {
                                memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
                                state->ring[i].data_size = size << 2;
@@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
                }
        }
 
+       /* Some targets prefer to collect their own registers */
+       if (!adreno_gpu->registers)
+               return 0;
+
        /* Count the number of registers */
        for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
                count += adreno_gpu->registers[i + 1] -
@@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(state->ring); i++)
-               kfree(state->ring[i].data);
+               kvfree(state->ring[i].data);
 
        for (i = 0; state->bos && i < state->nr_bos; i++)
                kvfree(state->bos[i].data);
@@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
 
 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
 {
+       void *buf;
+       size_t buf_itr = 0, buffer_size;
        char out[ASCII85_BUFSZ];
-       long l, datalen, i;
+       long l;
+       int i;
 
-       if (!ptr || !len)
-               return;
+       if (!src || !len)
+               return NULL;
+
+       l = ascii85_encode_len(len);
 
        /*
-        * Only dump the non-zero part of the buffer - rarely will any data
-        * completely fill the entire allocated size of the buffer
+        * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+        * account for the worst case of 5 bytes per dword plus the 1 for '\0'
         */
-       for (datalen = 0, i = 0; i < len >> 2; i++) {
-               if (ptr[i])
-                       datalen = (i << 2) + 1;
-       }
+       buffer_size = (l * 5) + 1;
+
+       buf = kvmalloc(buffer_size, GFP_KERNEL);
+       if (!buf)
+               return NULL;
+
+       for (i = 0; i < l; i++)
+               buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+                               ascii85_encode(src[i], out));
+
+       return buf;
+}
 
-       /* Skip printing the object if it is empty */
-       if (datalen == 0)
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+               bool *encoded)
+{
+       if (!*ptr || !len)
                return;
 
-       l = ascii85_encode_len(datalen);
+       if (!*encoded) {
+               long datalen, i;
+               u32 *buf = *ptr;
+
+               /*
+                * Only dump the non-zero part of the buffer - rarely will
+                * any data completely fill the entire allocated size of
+                * the buffer.
+                */
+               for (datalen = 0, i = 0; i < len >> 2; i++)
+                       if (buf[i])
+                               datalen = ((i + 1) << 2);
+
+               /*
+                * If we reach here, then the originally captured binary buffer
+                * will be replaced with the ascii85 encoded string
+                */
+               *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+               kvfree(buf);
+
+               *encoded = true;
+       }
+
+       if (!*ptr)
+               return;
 
        drm_puts(p, "    data: !!ascii85 |\n");
        drm_puts(p, "     ");
 
-       for (i = 0; i < l; i++)
-               drm_puts(p, ascii85_encode(ptr[i], out));
+       drm_puts(p, *ptr);
 
        drm_puts(p, "\n");
 }
@@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
                drm_printf(p, "    wptr: %d\n", state->ring[i].wptr);
                drm_printf(p, "    size: %d\n", MSM_GPU_RINGBUFFER_SZ);
 
-               adreno_show_object(p, state->ring[i].data,
-                       state->ring[i].data_size);
+               adreno_show_object(p, &state->ring[i].data,
+                       state->ring[i].data_size, &state->ring[i].encoded);
        }
 
        if (state->bos) {
@@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
                                state->bos[i].iova);
                        drm_printf(p, "    size: %zd\n", state->bos[i].size);
 
-                       adreno_show_object(p, state->bos[i].data,
-                               state->bos[i].size);
+                       adreno_show_object(p, &state->bos[i].data,
+                               state->bos[i].size, &state->bos[i].encoded);
                }
        }
 
-       drm_puts(p, "registers:\n");
+       if (state->nr_registers) {
+               drm_puts(p, "registers:\n");
 
-       for (i = 0; i < state->nr_registers; i++) {
-               drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
-                       state->registers[i * 2] << 2,
-                       state->registers[(i * 2) + 1]);
+               for (i = 0; i < state->nr_registers; i++) {
+                       drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
+                               state->registers[i * 2] << 2,
+                               state->registers[(i * 2) + 1]);
+               }
        }
 }
 #endif
@@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        int i;
 
+       if (!adreno_gpu->registers)
+               return;
+
        /* dump these out in a form that can be parsed by demsm: */
        printk("IO:region %s 00000000 00020000\n", gpu->name);
        for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
 
        node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
        if (!node) {
-               dev_err(dev, "Could not find the GPU powerlevels\n");
+               DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
                return -ENXIO;
        }
 
@@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
        else {
                ret = dev_pm_opp_of_add_table(dev);
                if (ret)
-                       dev_err(dev, "Unable to set the OPP table\n");
+                       DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
        }
 
        if (!ret) {
@@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
        adreno_gpu_config.va_start = SZ_16M;
        adreno_gpu_config.va_end = 0xffffffff;
+       /* maximum range of a2xx mmu */
+       if (adreno_is_a2xx(adreno_gpu))
+               adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
 
        adreno_gpu_config.nr_rings = nr_rings;
 
index de6e6ee42fba139070dde05d68d39d6a0c470231..5db459bc28a730cf61ad27e73a64edafdf899b60 100644 (file)
@@ -21,6 +21,7 @@
 #define __ADRENO_GPU_H__
 
 #include <linux/firmware.h>
+#include <linux/iopoll.h>
 
 #include "msm_gpu.h"
 
@@ -154,6 +155,20 @@ struct adreno_platform_config {
        __ret;                                             \
 })
 
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+       return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+       return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+       return gpu->revn == 225;
+}
 
 static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
 {
@@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
                gpu_write(&gpu->base, reg - 1, data);
 }
 
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
        ((1 << 29) \
        ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
 
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+       readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+               interval, timeout)
+
 #endif /* __ADRENO_GPU_H__ */
index 15eb03bed984689404d75be4458b302295fd5efe..79b907ac0b4b5c725ae0e2895fbaac08e56d5dc3 100644 (file)
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
 The rules-ng-ng source files this header was generated from are:
 - /home/robclark/src/envytools/rnndb/adreno.xml               (    501 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/freedreno_copyright.xml  (   1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml          (  42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml (  14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml    (  43052 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a3xx.xml          (  83840 bytes, from 2018-07-03 19:37:13)
 - /home/robclark/src/envytools/rnndb/adreno/a4xx.xml          ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml          ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml          ( 140790 bytes, from 2018-12-02 17:29:54)
 - /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml      (  10431 bytes, from 2018-09-14 13:03:07)
 - /home/robclark/src/envytools/rnndb/adreno/ocmem.xml         (   1773 bytes, from 2018-07-03 19:37:13)
 
@@ -108,6 +108,13 @@ enum pc_di_src_sel {
        DI_SRC_SEL_RESERVED = 3,
 };
 
+enum pc_di_face_cull_sel {
+       DI_FACE_CULL_NONE = 0,
+       DI_FACE_CULL_FETCH = 1,
+       DI_FACE_BACKFACE_CULL = 2,
+       DI_FACE_FRONTFACE_CULL = 3,
+};
+
 enum pc_di_index_size {
        INDEX_SIZE_IGN = 0,
        INDEX_SIZE_16_BIT = 0,
@@ -356,6 +363,7 @@ enum a6xx_render_mode {
        RM6_GMEM = 4,
        RM6_BLIT2D = 5,
        RM6_RESOLVE = 6,
+       RM6_BLIT2DSCALE = 12,
 };
 
 enum pseudo_reg {
index 879c13fe74e05a1f4249f2ba1ad0591da3c8a237..e45c69044935696c13cb1ddd46f77d3c536d29be 100644 (file)
@@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
        unsigned long irq_flags;
        int i, irq_count, enable_count, cb_count;
 
-       if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
-               DPU_ERROR("invalid parameters\n");
+       if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
                return 0;
-       }
 
        for (i = 0; i < irq_obj->total_irqs; i++) {
                spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 
 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
 
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
-               struct dentry *parent)
-{
-       dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
-                       parent, &dpu_kms->irq_obj,
-                       &dpu_debugfs_core_irq_fops);
-
-       return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
-       debugfs_remove(dpu_kms->irq_obj.debugfs_file);
-       dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
 {
-       return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+       debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+               &dpu_debugfs_core_irq_fops);
 }
 #endif
 
@@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
        struct msm_drm_private *priv;
        int i;
 
-       if (!dpu_kms) {
-               DPU_ERROR("invalid dpu_kms\n");
-               return;
-       } else if (!dpu_kms->dev) {
+       if (!dpu_kms->dev) {
                DPU_ERROR("invalid drm device\n");
                return;
        } else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
        }
 }
 
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
-       return 0;
-}
-
 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
 {
        struct msm_drm_private *priv;
        int i;
 
-       if (!dpu_kms) {
-               DPU_ERROR("invalid dpu_kms\n");
-               return;
-       } else if (!dpu_kms->dev) {
+       if (!dpu_kms->dev) {
                DPU_ERROR("invalid drm device\n");
                return;
        } else if (!dpu_kms->dev->dev_private) {
index 5e98bba46af53059bb06381c5b8f786f60a57ed8..e9015a2b23fe040b4a4803df25acd069d346d999 100644 (file)
  */
 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
 
-/**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms:           DPU handle
- * @return:            0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
 /**
  * dpu_core_irq_uninstall - uninstall core IRQ handler
  * @dpu_kms:           DPU handle
@@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
  * dpu_debugfs_core_irq_init - register core irq debugfs
  * @dpu_kms: pointer to kms
  * @parent: debugfs directory root
- * @Return: 0 on success
  */
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
                struct dentry *parent);
 
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
 #endif /* __DPU_CORE_IRQ_H__ */
index 41c5191f9056c9047bd07521b3abf9607149c1dc..9f20f397f77da858e028577e16c78158ff7afafc 100644 (file)
@@ -24,8 +24,6 @@
 #include "dpu_crtc.h"
 #include "dpu_core_perf.h"
 
-#define DPU_PERF_MODE_STRING_SIZE      128
-
 /**
  * enum dpu_perf_mode - performance tuning mode
  * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
        return to_dpu_kms(priv->kms);
 }
 
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
-       return dpu_crtc_is_enabled(crtc);
-}
-
 static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
 {
        struct drm_crtc *tmp_crtc;
-       bool intf_connected = false;
-
-       if (!crtc)
-               goto end;
 
        drm_for_each_crtc(tmp_crtc, crtc->dev) {
                if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
-                               _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+                               tmp_crtc->enabled) {
                        DPU_DEBUG("video interface connected crtc:%d\n",
                                tmp_crtc->base.id);
-                       intf_connected = true;
-                       goto end;
+                       return true;
                }
        }
 
-end:
-       return intf_connected;
+       return false;
 }
 
 static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
        memset(perf, 0, sizeof(struct dpu_core_perf_params));
 
        if (!dpu_cstate->bw_control) {
-               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                        perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
                                        1000ULL;
                        perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
                }
                perf->core_clk_rate = kms->perf.max_core_clk_rate;
        } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
-               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                        perf->bw_ctl[i] = 0;
                        perf->max_per_pipe_ib[i] = 0;
                }
                perf->core_clk_rate = 0;
        } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
-               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                        perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
                        perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
                }
@@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
        DPU_DEBUG(
                "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
                        crtc->base.id, perf->core_clk_rate,
-                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-                       perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
-                       perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+                       perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+                       perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+                       perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+                       perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+                       perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
+                       perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
 }
 
 int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
        /* obtain new values */
        _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
 
-       for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
-                       i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+       for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+                       i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
                curr_client_type = dpu_crtc_get_client_type(crtc);
 
                drm_for_each_crtc(tmp_crtc, crtc->dev) {
-                       if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+                       if (tmp_crtc->enabled &&
                            (dpu_crtc_get_client_type(tmp_crtc) ==
                                            curr_client_type) &&
                            (tmp_crtc != crtc)) {
@@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
        int ret = 0;
 
        drm_for_each_crtc(tmp_crtc, crtc->dev) {
-               if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+               if (tmp_crtc->enabled &&
                        curr_client_type ==
                                dpu_crtc_get_client_type(tmp_crtc)) {
                        dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
         */
        if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
                drm_for_each_crtc(tmp_crtc, crtc->dev) {
-                       if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+                       if (tmp_crtc->enabled &&
                                dpu_crtc_get_intf_mode(tmp_crtc) ==
                                                INTF_MODE_VIDEO)
                                return;
@@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
        if (kms->perf.enable_bw_release) {
                trace_dpu_cmd_release_bw(crtc->base.id);
                DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
-               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+               for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                        dpu_crtc->cur_perf.bw_ctl[i] = 0;
                        _dpu_core_perf_crtc_update_bus(kms, crtc, i);
                }
@@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
        struct dpu_crtc_state *dpu_cstate;
 
        drm_for_each_crtc(crtc, kms->dev) {
-               if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+               if (crtc->enabled) {
                        dpu_cstate = to_dpu_crtc_state(crtc->state);
                        clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
                                                        clk_rate);
@@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
        old = &dpu_crtc->cur_perf;
        new = &dpu_cstate->new_perf;
 
-       if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
-               for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+       if (crtc->enabled && !stop_req) {
+               for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                        /*
                         * cases for bus bandwidth update.
                         * 1. new bandwidth vote - "ab or ib vote" is higher
@@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
                update_clk = 1;
        }
        trace_dpu_perf_crtc_update(crtc->base.id,
-                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
-                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
-                               new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+                               new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+                               new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+                               new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
                                new->core_clk_rate, stop_req,
                                update_bus, update_clk);
 
-       for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+       for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
                if (update_bus & BIT(i)) {
                        ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
                        if (ret) {
@@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
        struct dpu_core_perf *perf = file->private_data;
        struct dpu_perf_cfg *cfg = &perf->catalog->perf;
        u32 perf_mode = 0;
-       char buf[10];
-
-       if (!perf)
-               return -ENODEV;
-
-       if (count >= sizeof(buf))
-               return -EFAULT;
-
-       if (copy_from_user(buf, user_buf, count))
-               return -EFAULT;
-
-       buf[count] = 0; /* end of string */
+       int ret;
 
-       if (kstrtouint(buf, 0, &perf_mode))
-               return -EFAULT;
+       ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+       if (ret)
+               return ret;
 
        if (perf_mode >= DPU_PERF_MODE_MAX)
-               return -EFAULT;
+               return -EINVAL;
 
        if (perf_mode == DPU_PERF_MODE_FIXED) {
                DRM_INFO("fix performance mode\n");
@@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
                        char __user *buff, size_t count, loff_t *ppos)
 {
        struct dpu_core_perf *perf = file->private_data;
-       int len = 0;
-       char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
-
-       if (!perf)
-               return -ENODEV;
+       int len;
+       char buf[128];
 
-       if (*ppos)
-               return 0;       /* the end */
-
-       len = snprintf(buf, sizeof(buf),
+       len = scnprintf(buf, sizeof(buf),
                        "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
                        perf->perf_tune.mode,
                        perf->perf_tune.min_core_clk,
                        perf->perf_tune.min_bus_vote);
-       if (len < 0 || len >= sizeof(buf))
-               return 0;
-
-       if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-               return -EFAULT;
-
-       *ppos += len;   /* increase offset */
 
-       return len;
+       return simple_read_from_buffer(buff, count, ppos, buf, len);
 }
 
 static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
        .write = _dpu_core_perf_mode_write,
 };
 
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-       debugfs_remove_recursive(perf->debugfs_root);
-       perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-               struct dentry *parent)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
 {
+       struct dpu_core_perf *perf = &dpu_kms->perf;
        struct dpu_mdss_cfg *catalog = perf->catalog;
-       struct msm_drm_private *priv;
-       struct dpu_kms *dpu_kms;
-
-       priv = perf->dev->dev_private;
-       if (!priv || !priv->kms) {
-               DPU_ERROR("invalid KMS reference\n");
-               return -EINVAL;
-       }
+       struct dentry *entry;
 
-       dpu_kms = to_dpu_kms(priv->kms);
-
-       perf->debugfs_root = debugfs_create_dir("core_perf", parent);
-       if (!perf->debugfs_root) {
-               DPU_ERROR("failed to create core perf debugfs\n");
+       entry = debugfs_create_dir("core_perf", parent);
+       if (IS_ERR_OR_NULL(entry))
                return -EINVAL;
-       }
 
-       debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+       debugfs_create_u64("max_core_clk_rate", 0600, entry,
                        &perf->max_core_clk_rate);
-       debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+       debugfs_create_u64("core_clk_rate", 0600, entry,
                        &perf->core_clk_rate);
-       debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+       debugfs_create_u32("enable_bw_release", 0600, entry,
                        (u32 *)&perf->enable_bw_release);
-       debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+       debugfs_create_u32("threshold_low", 0600, entry,
                        (u32 *)&catalog->perf.max_bw_low);
-       debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+       debugfs_create_u32("threshold_high", 0600, entry,
                        (u32 *)&catalog->perf.max_bw_high);
-       debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+       debugfs_create_u32("min_core_ib", 0600, entry,
                        (u32 *)&catalog->perf.min_core_ib);
-       debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+       debugfs_create_u32("min_llcc_ib", 0600, entry,
                        (u32 *)&catalog->perf.min_llcc_ib);
-       debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+       debugfs_create_u32("min_dram_ib", 0600, entry,
                        (u32 *)&catalog->perf.min_dram_ib);
-       debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+       debugfs_create_file("perf_mode", 0600, entry,
                        (u32 *)perf, &dpu_core_perf_mode_fops);
-       debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+       debugfs_create_u64("fix_core_clk_rate", 0600, entry,
                        &perf->fix_core_clk_rate);
-       debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+       debugfs_create_u64("fix_core_ib_vote", 0600, entry,
                        &perf->fix_core_ib_vote);
-       debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+       debugfs_create_u64("fix_core_ab_vote", 0600, entry,
                        &perf->fix_core_ab_vote);
 
        return 0;
 }
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-               struct dentry *parent)
-{
-       return 0;
-}
 #endif
 
 void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
                return;
        }
 
-       dpu_core_perf_debugfs_destroy(perf);
        perf->max_core_clk_rate = 0;
        perf->core_clk = NULL;
-       perf->phandle = NULL;
        perf->catalog = NULL;
        perf->dev = NULL;
 }
@@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
 int dpu_core_perf_init(struct dpu_core_perf *perf,
                struct drm_device *dev,
                struct dpu_mdss_cfg *catalog,
-               struct dpu_power_handle *phandle,
                struct dss_clk *core_clk)
 {
        perf->dev = dev;
        perf->catalog = catalog;
-       perf->phandle = phandle;
        perf->core_clk = core_clk;
 
        perf->max_core_clk_rate = core_clk->max_rate;
index fbcbe0c7527af7c6a4eb7926da2131161c54f076..37f518815eb77a37ef1c08c49dfc9ba148bd0b75 100644 (file)
 #include <drm/drm_crtc.h>
 
 #include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
 
 #define        DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE      412500000
 
+/**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+       DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+       DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+       DPU_CORE_PERF_DATA_BUS_ID_EBI,
+       DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
 /**
  * struct dpu_core_perf_params - definition of performance parameters
  * @max_per_pipe_ib: maximum instantaneous bandwidth request
@@ -30,8 +42,8 @@
  * @core_clk_rate: core clock rate request
  */
 struct dpu_core_perf_params {
-       u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
-       u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+       u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
+       u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
        u64 core_clk_rate;
 };
 
@@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
  * @dev: Pointer to drm device
  * @debugfs_root: top level debug folder
  * @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
  * @core_clk: Pointer to core clock structure
  * @core_clk_rate: current core clock rate
  * @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +77,6 @@ struct dpu_core_perf {
        struct drm_device *dev;
        struct dentry *debugfs_root;
        struct dpu_mdss_cfg *catalog;
-       struct dpu_power_handle *phandle;
        struct dss_clk *core_clk;
        u64 core_clk_rate;
        u64 max_core_clk_rate;
@@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
  * @perf: Pointer to core performance context
  * @dev: Pointer to drm device
  * @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
  * @core_clk: pointer to core clock
  */
 int dpu_core_perf_init(struct dpu_core_perf *perf,
                struct drm_device *dev,
                struct dpu_mdss_cfg *catalog,
-               struct dpu_power_handle *phandle,
                struct dss_clk *core_clk);
 
+struct dpu_kms;
+
 /**
  * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
  * @debugfs_parent: Pointer to parent debugfs
  */
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
-               struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
 
 #endif /* _DPU_CORE_PERF_H_ */
index d4530d60767b816605e9edcc2be921c5425e8c63..a6f0c38a0a95e8cd856eb038e6ee3fa94c6698d2 100644 (file)
@@ -33,7 +33,6 @@
 #include "dpu_plane.h"
 #include "dpu_encoder.h"
 #include "dpu_vbif.h"
-#include "dpu_power_handle.h"
 #include "dpu_core_perf.h"
 #include "dpu_trace.h"
 
 #define LEFT_MIXER 0
 #define RIGHT_MIXER 1
 
-static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
-                                           struct drm_display_mode *mode)
-{
-       return mode->hdisplay / cstate->num_mixers;
-}
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
 {
        struct msm_drm_private *priv = crtc->dev->dev_private;
 
@@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
        if (!crtc)
                return;
 
-       dpu_crtc->phandle = NULL;
-
        drm_crtc_cleanup(crtc);
-       mutex_destroy(&dpu_crtc->crtc_lock);
        kfree(dpu_crtc);
 }
 
@@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
                return INTF_MODE_NONE;
        }
 
-       drm_for_each_encoder(encoder, crtc->dev)
-               if (encoder->crtc == crtc)
-                       return dpu_encoder_get_intf_mode(encoder);
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+       /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+               return dpu_encoder_get_intf_mode(encoder);
 
        return INTF_MODE_NONE;
 }
 
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
 {
-       struct drm_crtc *crtc = (struct drm_crtc *)data;
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
        /* keep statistics on vblank callback - with auto reset via debugfs */
@@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
        trace_dpu_crtc_vblank_cb(DRMID(crtc));
 }
 
+static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
+{
+       int ret = 0;
+       struct drm_modeset_acquire_ctx ctx;
+
+       DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+       dpu_core_perf_crtc_release_bw(crtc);
+       DRM_MODESET_LOCK_ALL_END(ctx, ret);
+       if (ret)
+               DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
+                         ret);
+}
+
 static void dpu_crtc_frame_event_work(struct kthread_work *work)
 {
        struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
                        /* release bandwidth and other resources */
                        trace_dpu_crtc_frame_event_done(DRMID(crtc),
                                                        fevent->event);
-                       dpu_core_perf_crtc_release_bw(crtc);
+                       dpu_crtc_release_bw_unlocked(crtc);
                } else {
                        trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
                                                                fevent->event);
@@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
 
 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
 {
-       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct drm_encoder *enc;
 
-       mutex_lock(&dpu_crtc->crtc_lock);
-       /* Check for mixers on all encoders attached to this crtc */
-       list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
-               if (enc->crtc != crtc)
-                       continue;
+       WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
+       /* Check for mixers on all encoders attached to this crtc */
+       drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
                _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-       }
-
-       mutex_unlock(&dpu_crtc->crtc_lock);
 }
 
 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
-       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
        struct drm_display_mode *adj_mode = &state->adjusted_mode;
-       u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
+       u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
        int i;
 
        for (i = 0; i < cstate->num_mixers; i++) {
@@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
                r->x1 = crtc_split_width * i;
                r->y1 = 0;
                r->x2 = r->x1 + crtc_split_width;
-               r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+               r->y2 = adj_mode->vdisplay;
 
                trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
        }
@@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               if (encoder->crtc != crtc)
-                       continue;
-
-               /* encoder will trigger pending mask now */
+       /* encoder will trigger pending mask now */
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_trigger_kickoff_pending(encoder);
-       }
 
        /*
         * If no mixers have been allocated in dpu_crtc_atomic_check(),
@@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
        return rc;
 }
 
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
 {
        struct drm_encoder *encoder;
-       struct drm_device *dev = crtc->dev;
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
        struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
 
        DPU_ATRACE_BEGIN("crtc_commit");
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+       /*
+        * Encoder will flush/start now, unless it has a tx pending. If so, it
+        * may delay and flush at an irq event (e.g. ppdone)
+        */
+       drm_for_each_encoder_mask(encoder, crtc->dev,
+                                 crtc->state->encoder_mask) {
                struct dpu_encoder_kickoff_params params = { 0 };
-
-               if (encoder->crtc != crtc)
-                       continue;
-
-               /*
-                * Encoder will flush/start now, unless it has a tx pending.
-                * If so, it may delay and flush at an irq event (e.g. ppdone)
-                */
-               dpu_encoder_prepare_for_kickoff(encoder, &params);
+               dpu_encoder_prepare_for_kickoff(encoder, &params, async);
        }
 
-       /* wait for frame_event_done completion */
-       DPU_ATRACE_BEGIN("wait_for_frame_done_event");
-       ret = _dpu_crtc_wait_for_frame_done(crtc);
-       DPU_ATRACE_END("wait_for_frame_done_event");
-       if (ret) {
-               DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
-                               crtc->base.id,
-                               atomic_read(&dpu_crtc->frame_pending));
-               goto end;
-       }
 
-       if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
-               /* acquire bandwidth and other resources */
-               DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
-       } else
-               DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+       if (!async) {
+               /* wait for frame_event_done completion */
+               DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+               ret = _dpu_crtc_wait_for_frame_done(crtc);
+               DPU_ATRACE_END("wait_for_frame_done_event");
+               if (ret) {
+                       DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+                                       crtc->base.id,
+                                       atomic_read(&dpu_crtc->frame_pending));
+                       goto end;
+               }
+
+               if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+                       /* acquire bandwidth and other resources */
+                       DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+               } else
+                       DPU_DEBUG("crtc%d commit\n", crtc->base.id);
 
-       dpu_crtc->play_count++;
+               dpu_crtc->play_count++;
+       }
 
        dpu_vbif_clear_errors(dpu_kms);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               if (encoder->crtc != crtc)
-                       continue;
-
-               dpu_encoder_kickoff(encoder);
-       }
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+               dpu_encoder_kickoff(encoder, async);
 
 end:
-       reinit_completion(&dpu_crtc->frame_done_comp);
+       if (!async)
+               reinit_completion(&dpu_crtc->frame_done_comp);
        DPU_ATRACE_END("crtc_commit");
 }
 
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- */
-static void _dpu_crtc_vblank_enable_no_lock(
-               struct dpu_crtc *dpu_crtc, bool enable)
-{
-       struct drm_crtc *crtc = &dpu_crtc->base;
-       struct drm_device *dev = crtc->dev;
-       struct drm_encoder *enc;
-
-       if (enable) {
-               /* drop lock since power crtc cb may try to re-acquire lock */
-               mutex_unlock(&dpu_crtc->crtc_lock);
-               pm_runtime_get_sync(dev->dev);
-               mutex_lock(&dpu_crtc->crtc_lock);
-
-               list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-                       if (enc->crtc != crtc)
-                               continue;
-
-                       trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
-                                                    DRMID(enc), enable,
-                                                    dpu_crtc);
-
-                       dpu_encoder_register_vblank_callback(enc,
-                                       dpu_crtc_vblank_cb, (void *)crtc);
-               }
-       } else {
-               list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-                       if (enc->crtc != crtc)
-                               continue;
-
-                       trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
-                                                    DRMID(enc), enable,
-                                                    dpu_crtc);
-
-                       dpu_encoder_register_vblank_callback(enc, NULL, NULL);
-               }
-
-               /* drop lock since power crtc cb may try to re-acquire lock */
-               mutex_unlock(&dpu_crtc->crtc_lock);
-               pm_runtime_put_sync(dev->dev);
-               mutex_lock(&dpu_crtc->crtc_lock);
-       }
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
 {
-       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
-       DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
-       mutex_lock(&dpu_crtc->crtc_lock);
+       struct dpu_crtc_state *cstate;
 
-       /*
-        * If the vblank is enabled, release a power reference on suspend
-        * and take it back during resume (if it is still enabled).
-        */
-       trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
-       if (dpu_crtc->suspend == enable)
-               DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
-                               crtc->base.id, enable);
-       else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
-               _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
-       }
+       if (crtc->state)
+               dpu_crtc_destroy_state(crtc, crtc->state);
 
-       dpu_crtc->suspend = enable;
-       mutex_unlock(&dpu_crtc->crtc_lock);
+       crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
+       if (crtc->state)
+               crtc->state->crtc = crtc;
 }
 
 /**
@@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
        return &cstate->base;
 }
 
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_crtc_state *cstate;
-
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
-
-       /* revert suspend actions, if necessary */
-       if (dpu_kms_is_suspend_state(crtc->dev))
-               _dpu_crtc_set_suspend(crtc, false);
-
-       /* remove previous state, if present */
-       if (crtc->state) {
-               dpu_crtc_destroy_state(crtc, crtc->state);
-               crtc->state = 0;
-       }
-
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
-       if (!cstate) {
-               DPU_ERROR("failed to allocate state\n");
-               return;
-       }
-
-       cstate->base.crtc = crtc;
-       crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
-       struct drm_crtc *crtc = arg;
-       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-       struct drm_encoder *encoder;
-
-       mutex_lock(&dpu_crtc->crtc_lock);
-
-       trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
-       /* restore encoder; crtc will be programmed during commit */
-       drm_for_each_encoder(encoder, crtc->dev) {
-               if (encoder->crtc != crtc)
-                       continue;
-
-               dpu_encoder_virt_restore(encoder);
-       }
-
-       mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+                            struct drm_crtc_state *old_crtc_state)
 {
        struct dpu_crtc *dpu_crtc;
        struct dpu_crtc_state *cstate;
@@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 
-       if (dpu_kms_is_suspend_state(crtc->dev))
-               _dpu_crtc_set_suspend(crtc, true);
-
        /* Disable/save vblank irq handling */
        drm_crtc_vblank_off(crtc);
 
-       mutex_lock(&dpu_crtc->crtc_lock);
+       drm_for_each_encoder_mask(encoder, crtc->dev,
+                                 old_crtc_state->encoder_mask)
+               dpu_encoder_assign_crtc(encoder, NULL);
 
        /* wait for frame_event_done completion */
        if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
                                atomic_read(&dpu_crtc->frame_pending));
 
        trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
-       if (dpu_crtc->enabled && !dpu_crtc->suspend &&
-                       dpu_crtc->vblank_requested) {
-               _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
-       }
        dpu_crtc->enabled = false;
 
        if (atomic_read(&dpu_crtc->frame_pending)) {
@@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
 
        dpu_core_perf_crtc_update(crtc, 0, true);
 
-       drm_for_each_encoder(encoder, crtc->dev) {
-               if (encoder->crtc != crtc)
-                       continue;
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
-       }
-
-       if (dpu_crtc->power_event)
-               dpu_power_handle_unregister_event(dpu_crtc->phandle,
-                               dpu_crtc->power_event);
 
        memset(cstate->mixers, 0, sizeof(cstate->mixers));
        cstate->num_mixers = 0;
@@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
        cstate->bw_control = false;
        cstate->bw_split_vote = false;
 
-       mutex_unlock(&dpu_crtc->crtc_lock);
-
        if (crtc->state->event && !crtc->state->active) {
                spin_lock_irqsave(&crtc->dev->event_lock, flags);
                drm_crtc_send_vblank_event(crtc, crtc->state->event);
                crtc->state->event = NULL;
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
        }
+
+       pm_runtime_put_sync(crtc->dev->dev);
 }
 
 static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
        }
        priv = crtc->dev->dev_private;
 
+       pm_runtime_get_sync(crtc->dev->dev);
+
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
        dpu_crtc = to_dpu_crtc(crtc);
 
-       drm_for_each_encoder(encoder, crtc->dev) {
-               if (encoder->crtc != crtc)
-                       continue;
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_register_frame_event_callback(encoder,
                                dpu_crtc_frame_event_cb, (void *)crtc);
-       }
 
-       mutex_lock(&dpu_crtc->crtc_lock);
        trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
-       if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
-                       dpu_crtc->vblank_requested) {
-               _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
-       }
        dpu_crtc->enabled = true;
 
-       mutex_unlock(&dpu_crtc->crtc_lock);
+       drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+               dpu_encoder_assign_crtc(encoder, crtc);
 
        /* Enable/restore vblank irq handling */
        drm_crtc_vblank_on(crtc);
-
-       dpu_crtc->power_event = dpu_power_handle_register_event(
-               dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
-               dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
 }
 
 struct plane_state {
@@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 
        memset(pipe_staged, 0, sizeof(pipe_staged));
 
-       mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
+       mixer_width = mode->hdisplay / cstate->num_mixers;
 
        _dpu_crtc_setup_lm_bounds(crtc, state);
 
@@ -1289,21 +1134,32 @@ end:
 
 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
-       struct dpu_crtc *dpu_crtc;
-
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return -EINVAL;
-       }
-       dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct drm_encoder *enc;
 
-       mutex_lock(&dpu_crtc->crtc_lock);
        trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
-       if (dpu_crtc->enabled && !dpu_crtc->suspend) {
-               _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+
+       /*
+        * Normally we would iterate through encoder_mask in crtc state to find
+        * attached encoders. In this case, we might be disabling vblank _after_
+        * encoder_mask has been cleared.
+        *
+        * Instead, we "assign" a crtc to the encoder in enable and clear it in
+        * disable (which is also after encoder_mask is cleared). So instead of
+        * using encoder mask, we'll ask the encoder to toggle itself iff it's
+        * currently assigned to our crtc.
+        *
+        * Note also that this function cannot be called while crtc is disabled
+        * since we use drm_crtc_vblank_on/off. So we don't need to worry
+        * about the assigned crtcs being inconsistent with the current state
+        * (which means no need to worry about modeset locks).
+        */
+       list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+               trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+                                            dpu_crtc);
+
+               dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
        }
-       dpu_crtc->vblank_requested = en;
-       mutex_unlock(&dpu_crtc->crtc_lock);
 
        return 0;
 }
@@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
 
        int i, out_width;
 
-       if (!s || !s->private)
-               return -EINVAL;
-
        dpu_crtc = s->private;
        crtc = &dpu_crtc->base;
 
        drm_modeset_lock_all(crtc->dev);
        cstate = to_dpu_crtc_state(crtc->state);
 
-       mutex_lock(&dpu_crtc->crtc_lock);
        mode = &crtc->state->adjusted_mode;
-       out_width = _dpu_crtc_get_mixer_width(cstate, mode);
+       out_width = mode->hdisplay / cstate->num_mixers;
 
        seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
                                mode->hdisplay, mode->vdisplay);
@@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
                dpu_crtc->vblank_cb_time = ktime_set(0, 0);
        }
 
-       seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
-       mutex_unlock(&dpu_crtc->crtc_lock);
        drm_modeset_unlock_all(crtc->dev);
 
        return 0;
@@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
        seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
        seq_printf(s, "core_clk_rate: %llu\n",
                        dpu_crtc->cur_perf.core_clk_rate);
-       for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
-                       i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
-               seq_printf(s, "bw_ctl[%s]: %llu\n",
-                               dpu_power_handle_get_dbus_name(i),
+       for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+                       i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
+               seq_printf(s, "bw_ctl[%d]: %llu\n", i,
                                dpu_crtc->cur_perf.bw_ctl[i]);
-               seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
-                               dpu_power_handle_get_dbus_name(i),
+               seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
                                dpu_crtc->cur_perf.max_per_pipe_ib[i]);
        }
 
@@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
 
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_kms *dpu_kms;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
        static const struct file_operations debugfs_status_fops = {
                .open =         _dpu_debugfs_status_open,
@@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
                .release =      single_release,
        };
 
-       if (!crtc)
-               return -EINVAL;
-       dpu_crtc = to_dpu_crtc(crtc);
-
-       dpu_kms = _dpu_crtc_get_kms(crtc);
-
        dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
                        crtc->dev->primary->debugfs_root);
        if (!dpu_crtc->debugfs_root)
@@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 
        return 0;
 }
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-       struct dpu_crtc *dpu_crtc;
-
-       if (!crtc)
-               return;
-       dpu_crtc = to_dpu_crtc(crtc);
-       debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
 #else
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
        return 0;
 }
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
 #endif /* CONFIG_DEBUG_FS */
 
 static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
 
 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
 {
-       _dpu_crtc_destroy_debugfs(crtc);
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+       debugfs_remove_recursive(dpu_crtc->debugfs_root);
 }
 
 static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
 };
 
 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
-       .disable = dpu_crtc_disable,
+       .atomic_disable = dpu_crtc_disable,
        .atomic_enable = dpu_crtc_enable,
        .atomic_check = dpu_crtc_atomic_check,
        .atomic_begin = dpu_crtc_atomic_begin,
@@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
        crtc = &dpu_crtc->base;
        crtc->dev = dev;
 
-       mutex_init(&dpu_crtc->crtc_lock);
        spin_lock_init(&dpu_crtc->spin_lock);
        atomic_set(&dpu_crtc->frame_pending, 0);
 
@@ -1602,8 +1429,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
        /* initialize event handling */
        spin_lock_init(&dpu_crtc->event_lock);
 
-       dpu_crtc->phandle = &kms->phandle;
-
        DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
        return crtc;
 }
index 3723b48303352c2d9ffbd8da0549b156cd05edcb..dbfb38a1986c8e8164e417add9aba1046160ee01 100644 (file)
@@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
  * @vblank_cb_count : count of vblank callback since last reset
  * @play_count    : frame count between crtc enable and disable
  * @vblank_cb_time  : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend         : whether or not a suspend operation is in progress
  * @enabled       : whether the DPU CRTC is currently enabled. updated in the
  *                  commit-thread, not state-swap time which is earlier, so
  *                  safe to make decisions on during VBLANK on/off work
@@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
  * @dirty_list    : list of color processing features are dirty
  * @ad_dirty: list containing ad properties that are dirty
  * @ad_active: list containing ad properties that are active
- * @crtc_lock     : crtc lock around create, destroy and access.
  * @frame_pending : Whether or not an update is pending
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
@@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
  * @event_worker  : Event worker queue
  * @event_lock    : Spinlock around event handling code
  * @phandle: Pointer to power handler
- * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
  */
 struct dpu_crtc {
@@ -168,8 +164,6 @@ struct dpu_crtc {
        u32 vblank_cb_count;
        u64 play_count;
        ktime_t vblank_cb_time;
-       bool vblank_requested;
-       bool suspend;
        bool enabled;
 
        struct list_head feature_list;
@@ -178,8 +172,6 @@ struct dpu_crtc {
        struct list_head ad_dirty;
        struct list_head ad_active;
 
-       struct mutex crtc_lock;
-
        atomic_t frame_pending;
        struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
        struct list_head frame_event_list;
@@ -189,9 +181,6 @@ struct dpu_crtc {
        /* for handling internal event thread */
        spinlock_t event_lock;
 
-       struct dpu_power_handle *phandle;
-       struct dpu_power_event *power_event;
-
        struct dpu_core_perf_params cur_perf;
 
        struct dpu_crtc_smmu_state_data smmu_state;
@@ -237,42 +226,13 @@ struct dpu_crtc_state {
 #define to_dpu_crtc_state(x) \
        container_of(x, struct dpu_crtc_state, base)
 
-/**
- * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
- * @cstate: Pointer to dpu crtc state
- * @Return: true - has two mixers, false - has one mixer
- */
-static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
-{
-       return cstate->num_mixers == CRTC_DUAL_MIXERS;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
-               struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
-       if (!dpu_crtc || !cstate || !mode)
-               return 0;
-
-       return mode->vdisplay;
-}
-
 /**
  * dpu_crtc_frame_pending - retun the number of pending frames
  * @crtc: Pointer to drm crtc object
  */
 static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
 {
-       struct dpu_crtc *dpu_crtc;
-
-       if (!crtc)
-               return -EINVAL;
-
-       dpu_crtc = to_dpu_crtc(crtc);
-       return atomic_read(&dpu_crtc->frame_pending);
+       return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
 }
 
 /**
@@ -282,11 +242,18 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
  */
 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
 
+/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
 /**
  * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
  * @crtc: Pointer to drm crtc object
+ * @async: true if the commit is asynchronous, false otherwise
  */
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
 
 /**
  * dpu_crtc_complete_commit - callback signalling completion of current commit
@@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
 static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
                                                struct drm_crtc *crtc)
 {
-       struct dpu_crtc_state *cstate =
-                       crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
-       if (!cstate)
-               return NRT_CLIENT;
-
-       return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
-       return crtc ? crtc->enabled : false;
+       return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
 }
 
 #endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644 (file)
index ae2aee7..0000000
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU     DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT  DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN      80
-
-#define DBGBUS_FLAGS_DSPP      BIT(0)
-#define DBGBUS_DSPP_STATUS     0x34C
-
-#define DBGBUS_NAME_DPU                "dpu"
-#define DBGBUS_NAME_VBIF_RT    "vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0   0x188
-#define DBGBUS_AXI_INTF        0x194
-#define DBGBUS_SSPP1   0x298
-#define DBGBUS_DSPP    0x348
-#define DBGBUS_PERIPH  0x418
-
-#define TEST_MASK(id, tp)      ((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON                        0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL    0x210
-#define MMSS_VBIF_TEST_BUS_OUT         0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR              0x190
-#define MMSS_VBIF_SRC_ERR              0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1       0x204
-#define MMSS_VBIF_ERR_INFO             0X1a0
-#define MMSS_VBIF_ERR_INFO_1           0x1a4
-#define MMSS_VBIF_CLIENT_NUM           14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- *     may sub-ranges: sub-ranges are used for dumping
- *     or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len:  buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
-       struct list_head reg_base_head;
-       char name[REG_BASE_NAME_LEN];
-       void __iomem *base;
-       size_t off;
-       size_t cnt;
-       size_t max_offset;
-       char *buf;
-       size_t buf_len;
-       void (*cb)(void *ptr);
-       void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
-       u32 wr_addr;
-       u32 block_id;
-       u32 test_id;
-       void (*analyzer)(void __iomem *mem_base,
-                               struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
-       u32 disable_bus_addr;
-       u32 block_bus_addr;
-       u32 bit_offset;
-       u32 block_cnt;
-       u32 test_pnt_start;
-       u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
-       char *name;
-       u32 enable_mask;
-       bool include_in_deferred_work;
-       u32 flags;
-       u32 entries_size;
-       u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
-       struct dpu_dbg_debug_bus_common cmn;
-       struct dpu_debug_bus_entry *entries;
-       u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
-       struct dpu_dbg_debug_bus_common cmn;
-       struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
-       struct list_head reg_base_list;
-       struct device *dev;
-
-       struct work_struct dump_work;
-
-       struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
-       struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
-               struct dpu_debug_bus_entry *entry, u32 val)
-{
-       dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
-                       entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
-               struct dpu_debug_bus_entry *entry, u32 val)
-{
-       if (!(val & 0xFFF000))
-               return;
-
-       dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
-                       entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
-               struct dpu_debug_bus_entry *entry, u32 val)
-{
-       if (!(val & BIT(15)))
-               return;
-
-       dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
-                       entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
-               struct dpu_debug_bus_entry *entry, u32 val)
-{
-       if (!(val & BIT(15)))
-               return;
-
-       dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
-                       entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
-       /* Unpack 0 sspp 0*/
-       { DBGBUS_SSPP0, 50, 2 },
-       { DBGBUS_SSPP0, 60, 2 },
-       { DBGBUS_SSPP0, 70, 2 },
-       { DBGBUS_SSPP0, 85, 2 },
-
-       /* Upack 0 sspp 1*/
-       { DBGBUS_SSPP1, 50, 2 },
-       { DBGBUS_SSPP1, 60, 2 },
-       { DBGBUS_SSPP1, 70, 2 },
-       { DBGBUS_SSPP1, 85, 2 },
-
-       /* scheduler */
-       { DBGBUS_DSPP, 130, 0 },
-       { DBGBUS_DSPP, 130, 1 },
-       { DBGBUS_DSPP, 130, 2 },
-       { DBGBUS_DSPP, 130, 3 },
-       { DBGBUS_DSPP, 130, 4 },
-       { DBGBUS_DSPP, 130, 5 },
-
-       /* qseed */
-       { DBGBUS_SSPP0, 6, 0},
-       { DBGBUS_SSPP0, 6, 1},
-       { DBGBUS_SSPP0, 26, 0},
-       { DBGBUS_SSPP0, 26, 1},
-       { DBGBUS_SSPP1, 6, 0},
-       { DBGBUS_SSPP1, 6, 1},
-       { DBGBUS_SSPP1, 26, 0},
-       { DBGBUS_SSPP1, 26, 1},
-
-       /* scale */
-       { DBGBUS_SSPP0, 16, 0},
-       { DBGBUS_SSPP0, 16, 1},
-       { DBGBUS_SSPP0, 36, 0},
-       { DBGBUS_SSPP0, 36, 1},
-       { DBGBUS_SSPP1, 16, 0},
-       { DBGBUS_SSPP1, 16, 1},
-       { DBGBUS_SSPP1, 36, 0},
-       { DBGBUS_SSPP1, 36, 1},
-
-       /* fetch sspp0 */
-
-       /* vig 0 */
-       { DBGBUS_SSPP0, 0, 0 },
-       { DBGBUS_SSPP0, 0, 1 },
-       { DBGBUS_SSPP0, 0, 2 },
-       { DBGBUS_SSPP0, 0, 3 },
-       { DBGBUS_SSPP0, 0, 4 },
-       { DBGBUS_SSPP0, 0, 5 },
-       { DBGBUS_SSPP0, 0, 6 },
-       { DBGBUS_SSPP0, 0, 7 },
-
-       { DBGBUS_SSPP0, 1, 0 },
-       { DBGBUS_SSPP0, 1, 1 },
-       { DBGBUS_SSPP0, 1, 2 },
-       { DBGBUS_SSPP0, 1, 3 },
-       { DBGBUS_SSPP0, 1, 4 },
-       { DBGBUS_SSPP0, 1, 5 },
-       { DBGBUS_SSPP0, 1, 6 },
-       { DBGBUS_SSPP0, 1, 7 },
-
-       { DBGBUS_SSPP0, 2, 0 },
-       { DBGBUS_SSPP0, 2, 1 },
-       { DBGBUS_SSPP0, 2, 2 },
-       { DBGBUS_SSPP0, 2, 3 },
-       { DBGBUS_SSPP0, 2, 4 },
-       { DBGBUS_SSPP0, 2, 5 },
-       { DBGBUS_SSPP0, 2, 6 },
-       { DBGBUS_SSPP0, 2, 7 },
-
-       { DBGBUS_SSPP0, 4, 0 },
-       { DBGBUS_SSPP0, 4, 1 },
-       { DBGBUS_SSPP0, 4, 2 },
-       { DBGBUS_SSPP0, 4, 3 },
-       { DBGBUS_SSPP0, 4, 4 },
-       { DBGBUS_SSPP0, 4, 5 },
-       { DBGBUS_SSPP0, 4, 6 },
-       { DBGBUS_SSPP0, 4, 7 },
-
-       { DBGBUS_SSPP0, 5, 0 },
-       { DBGBUS_SSPP0, 5, 1 },
-       { DBGBUS_SSPP0, 5, 2 },
-       { DBGBUS_SSPP0, 5, 3 },
-       { DBGBUS_SSPP0, 5, 4 },
-       { DBGBUS_SSPP0, 5, 5 },
-       { DBGBUS_SSPP0, 5, 6 },
-       { DBGBUS_SSPP0, 5, 7 },
-
-       /* vig 2 */
-       { DBGBUS_SSPP0, 20, 0 },
-       { DBGBUS_SSPP0, 20, 1 },
-       { DBGBUS_SSPP0, 20, 2 },
-       { DBGBUS_SSPP0, 20, 3 },
-       { DBGBUS_SSPP0, 20, 4 },
-       { DBGBUS_SSPP0, 20, 5 },
-       { DBGBUS_SSPP0, 20, 6 },
-       { DBGBUS_SSPP0, 20, 7 },
-
-       { DBGBUS_SSPP0, 21, 0 },
-       { DBGBUS_SSPP0, 21, 1 },
-       { DBGBUS_SSPP0, 21, 2 },
-       { DBGBUS_SSPP0, 21, 3 },
-       { DBGBUS_SSPP0, 21, 4 },
-       { DBGBUS_SSPP0, 21, 5 },
-       { DBGBUS_SSPP0, 21, 6 },
-       { DBGBUS_SSPP0, 21, 7 },
-
-       { DBGBUS_SSPP0, 22, 0 },
-       { DBGBUS_SSPP0, 22, 1 },
-       { DBGBUS_SSPP0, 22, 2 },
-       { DBGBUS_SSPP0, 22, 3 },
-       { DBGBUS_SSPP0, 22, 4 },
-       { DBGBUS_SSPP0, 22, 5 },
-       { DBGBUS_SSPP0, 22, 6 },
-       { DBGBUS_SSPP0, 22, 7 },
-
-       { DBGBUS_SSPP0, 24, 0 },
-       { DBGBUS_SSPP0, 24, 1 },
-       { DBGBUS_SSPP0, 24, 2 },
-       { DBGBUS_SSPP0, 24, 3 },
-       { DBGBUS_SSPP0, 24, 4 },
-       { DBGBUS_SSPP0, 24, 5 },
-       { DBGBUS_SSPP0, 24, 6 },
-       { DBGBUS_SSPP0, 24, 7 },
-
-       { DBGBUS_SSPP0, 25, 0 },
-       { DBGBUS_SSPP0, 25, 1 },
-       { DBGBUS_SSPP0, 25, 2 },
-       { DBGBUS_SSPP0, 25, 3 },
-       { DBGBUS_SSPP0, 25, 4 },
-       { DBGBUS_SSPP0, 25, 5 },
-       { DBGBUS_SSPP0, 25, 6 },
-       { DBGBUS_SSPP0, 25, 7 },
-
-       /* dma 2 */
-       { DBGBUS_SSPP0, 30, 0 },
-       { DBGBUS_SSPP0, 30, 1 },
-       { DBGBUS_SSPP0, 30, 2 },
-       { DBGBUS_SSPP0, 30, 3 },
-       { DBGBUS_SSPP0, 30, 4 },
-       { DBGBUS_SSPP0, 30, 5 },
-       { DBGBUS_SSPP0, 30, 6 },
-       { DBGBUS_SSPP0, 30, 7 },
-
-       { DBGBUS_SSPP0, 31, 0 },
-       { DBGBUS_SSPP0, 31, 1 },
-       { DBGBUS_SSPP0, 31, 2 },
-       { DBGBUS_SSPP0, 31, 3 },
-       { DBGBUS_SSPP0, 31, 4 },
-       { DBGBUS_SSPP0, 31, 5 },
-       { DBGBUS_SSPP0, 31, 6 },
-       { DBGBUS_SSPP0, 31, 7 },
-
-       { DBGBUS_SSPP0, 32, 0 },
-       { DBGBUS_SSPP0, 32, 1 },
-       { DBGBUS_SSPP0, 32, 2 },
-       { DBGBUS_SSPP0, 32, 3 },
-       { DBGBUS_SSPP0, 32, 4 },
-       { DBGBUS_SSPP0, 32, 5 },
-       { DBGBUS_SSPP0, 32, 6 },
-       { DBGBUS_SSPP0, 32, 7 },
-
-       { DBGBUS_SSPP0, 33, 0 },
-       { DBGBUS_SSPP0, 33, 1 },
-       { DBGBUS_SSPP0, 33, 2 },
-       { DBGBUS_SSPP0, 33, 3 },
-       { DBGBUS_SSPP0, 33, 4 },
-       { DBGBUS_SSPP0, 33, 5 },
-       { DBGBUS_SSPP0, 33, 6 },
-       { DBGBUS_SSPP0, 33, 7 },
-
-       { DBGBUS_SSPP0, 34, 0 },
-       { DBGBUS_SSPP0, 34, 1 },
-       { DBGBUS_SSPP0, 34, 2 },
-       { DBGBUS_SSPP0, 34, 3 },
-       { DBGBUS_SSPP0, 34, 4 },
-       { DBGBUS_SSPP0, 34, 5 },
-       { DBGBUS_SSPP0, 34, 6 },
-       { DBGBUS_SSPP0, 34, 7 },
-
-       { DBGBUS_SSPP0, 35, 0 },
-       { DBGBUS_SSPP0, 35, 1 },
-       { DBGBUS_SSPP0, 35, 2 },
-       { DBGBUS_SSPP0, 35, 3 },
-
-       /* dma 0 */
-       { DBGBUS_SSPP0, 40, 0 },
-       { DBGBUS_SSPP0, 40, 1 },
-       { DBGBUS_SSPP0, 40, 2 },
-       { DBGBUS_SSPP0, 40, 3 },
-       { DBGBUS_SSPP0, 40, 4 },
-       { DBGBUS_SSPP0, 40, 5 },
-       { DBGBUS_SSPP0, 40, 6 },
-       { DBGBUS_SSPP0, 40, 7 },
-
-       { DBGBUS_SSPP0, 41, 0 },
-       { DBGBUS_SSPP0, 41, 1 },
-       { DBGBUS_SSPP0, 41, 2 },
-       { DBGBUS_SSPP0, 41, 3 },
-       { DBGBUS_SSPP0, 41, 4 },
-       { DBGBUS_SSPP0, 41, 5 },
-       { DBGBUS_SSPP0, 41, 6 },
-       { DBGBUS_SSPP0, 41, 7 },
-
-       { DBGBUS_SSPP0, 42, 0 },
-       { DBGBUS_SSPP0, 42, 1 },
-       { DBGBUS_SSPP0, 42, 2 },
-       { DBGBUS_SSPP0, 42, 3 },
-       { DBGBUS_SSPP0, 42, 4 },
-       { DBGBUS_SSPP0, 42, 5 },
-       { DBGBUS_SSPP0, 42, 6 },
-       { DBGBUS_SSPP0, 42, 7 },
-
-       { DBGBUS_SSPP0, 44, 0 },
-       { DBGBUS_SSPP0, 44, 1 },
-       { DBGBUS_SSPP0, 44, 2 },
-       { DBGBUS_SSPP0, 44, 3 },
-       { DBGBUS_SSPP0, 44, 4 },
-       { DBGBUS_SSPP0, 44, 5 },
-       { DBGBUS_SSPP0, 44, 6 },
-       { DBGBUS_SSPP0, 44, 7 },
-
-       { DBGBUS_SSPP0, 45, 0 },
-       { DBGBUS_SSPP0, 45, 1 },
-       { DBGBUS_SSPP0, 45, 2 },
-       { DBGBUS_SSPP0, 45, 3 },
-       { DBGBUS_SSPP0, 45, 4 },
-       { DBGBUS_SSPP0, 45, 5 },
-       { DBGBUS_SSPP0, 45, 6 },
-       { DBGBUS_SSPP0, 45, 7 },
-
-       /* fetch sspp1 */
-       /* vig 1 */
-       { DBGBUS_SSPP1, 0, 0 },
-       { DBGBUS_SSPP1, 0, 1 },
-       { DBGBUS_SSPP1, 0, 2 },
-       { DBGBUS_SSPP1, 0, 3 },
-       { DBGBUS_SSPP1, 0, 4 },
-       { DBGBUS_SSPP1, 0, 5 },
-       { DBGBUS_SSPP1, 0, 6 },
-       { DBGBUS_SSPP1, 0, 7 },
-
-       { DBGBUS_SSPP1, 1, 0 },
-       { DBGBUS_SSPP1, 1, 1 },
-       { DBGBUS_SSPP1, 1, 2 },
-       { DBGBUS_SSPP1, 1, 3 },
-       { DBGBUS_SSPP1, 1, 4 },
-       { DBGBUS_SSPP1, 1, 5 },
-       { DBGBUS_SSPP1, 1, 6 },
-       { DBGBUS_SSPP1, 1, 7 },
-
-       { DBGBUS_SSPP1, 2, 0 },
-       { DBGBUS_SSPP1, 2, 1 },
-       { DBGBUS_SSPP1, 2, 2 },
-       { DBGBUS_SSPP1, 2, 3 },
-       { DBGBUS_SSPP1, 2, 4 },
-       { DBGBUS_SSPP1, 2, 5 },
-       { DBGBUS_SSPP1, 2, 6 },
-       { DBGBUS_SSPP1, 2, 7 },
-
-       { DBGBUS_SSPP1, 4, 0 },
-       { DBGBUS_SSPP1, 4, 1 },
-       { DBGBUS_SSPP1, 4, 2 },
-       { DBGBUS_SSPP1, 4, 3 },
-       { DBGBUS_SSPP1, 4, 4 },
-       { DBGBUS_SSPP1, 4, 5 },
-       { DBGBUS_SSPP1, 4, 6 },
-       { DBGBUS_SSPP1, 4, 7 },
-
-       { DBGBUS_SSPP1, 5, 0 },
-       { DBGBUS_SSPP1, 5, 1 },
-       { DBGBUS_SSPP1, 5, 2 },
-       { DBGBUS_SSPP1, 5, 3 },
-       { DBGBUS_SSPP1, 5, 4 },
-       { DBGBUS_SSPP1, 5, 5 },
-       { DBGBUS_SSPP1, 5, 6 },
-       { DBGBUS_SSPP1, 5, 7 },
-
-       /* vig 3 */
-       { DBGBUS_SSPP1, 20, 0 },
-       { DBGBUS_SSPP1, 20, 1 },
-       { DBGBUS_SSPP1, 20, 2 },
-       { DBGBUS_SSPP1, 20, 3 },
-       { DBGBUS_SSPP1, 20, 4 },
-       { DBGBUS_SSPP1, 20, 5 },
-       { DBGBUS_SSPP1, 20, 6 },
-       { DBGBUS_SSPP1, 20, 7 },
-
-       { DBGBUS_SSPP1, 21, 0 },
-       { DBGBUS_SSPP1, 21, 1 },
-       { DBGBUS_SSPP1, 21, 2 },
-       { DBGBUS_SSPP1, 21, 3 },
-       { DBGBUS_SSPP1, 21, 4 },
-       { DBGBUS_SSPP1, 21, 5 },
-       { DBGBUS_SSPP1, 21, 6 },
-       { DBGBUS_SSPP1, 21, 7 },
-
-       { DBGBUS_SSPP1, 22, 0 },
-       { DBGBUS_SSPP1, 22, 1 },
-       { DBGBUS_SSPP1, 22, 2 },
-       { DBGBUS_SSPP1, 22, 3 },
-       { DBGBUS_SSPP1, 22, 4 },
-       { DBGBUS_SSPP1, 22, 5 },
-       { DBGBUS_SSPP1, 22, 6 },
-       { DBGBUS_SSPP1, 22, 7 },
-
-       { DBGBUS_SSPP1, 24, 0 },
-       { DBGBUS_SSPP1, 24, 1 },
-       { DBGBUS_SSPP1, 24, 2 },
-       { DBGBUS_SSPP1, 24, 3 },
-       { DBGBUS_SSPP1, 24, 4 },
-       { DBGBUS_SSPP1, 24, 5 },
-       { DBGBUS_SSPP1, 24, 6 },
-       { DBGBUS_SSPP1, 24, 7 },
-
-       { DBGBUS_SSPP1, 25, 0 },
-       { DBGBUS_SSPP1, 25, 1 },
-       { DBGBUS_SSPP1, 25, 2 },
-       { DBGBUS_SSPP1, 25, 3 },
-       { DBGBUS_SSPP1, 25, 4 },
-       { DBGBUS_SSPP1, 25, 5 },
-       { DBGBUS_SSPP1, 25, 6 },
-       { DBGBUS_SSPP1, 25, 7 },
-
-       /* dma 3 */
-       { DBGBUS_SSPP1, 30, 0 },
-       { DBGBUS_SSPP1, 30, 1 },
-       { DBGBUS_SSPP1, 30, 2 },
-       { DBGBUS_SSPP1, 30, 3 },
-       { DBGBUS_SSPP1, 30, 4 },
-       { DBGBUS_SSPP1, 30, 5 },
-       { DBGBUS_SSPP1, 30, 6 },
-       { DBGBUS_SSPP1, 30, 7 },
-
-       { DBGBUS_SSPP1, 31, 0 },
-       { DBGBUS_SSPP1, 31, 1 },
-       { DBGBUS_SSPP1, 31, 2 },
-       { DBGBUS_SSPP1, 31, 3 },
-       { DBGBUS_SSPP1, 31, 4 },
-       { DBGBUS_SSPP1, 31, 5 },
-       { DBGBUS_SSPP1, 31, 6 },
-       { DBGBUS_SSPP1, 31, 7 },
-
-       { DBGBUS_SSPP1, 32, 0 },
-       { DBGBUS_SSPP1, 32, 1 },
-       { DBGBUS_SSPP1, 32, 2 },
-       { DBGBUS_SSPP1, 32, 3 },
-       { DBGBUS_SSPP1, 32, 4 },
-       { DBGBUS_SSPP1, 32, 5 },
-       { DBGBUS_SSPP1, 32, 6 },
-       { DBGBUS_SSPP1, 32, 7 },
-
-       { DBGBUS_SSPP1, 33, 0 },
-       { DBGBUS_SSPP1, 33, 1 },
-       { DBGBUS_SSPP1, 33, 2 },
-       { DBGBUS_SSPP1, 33, 3 },
-       { DBGBUS_SSPP1, 33, 4 },
-       { DBGBUS_SSPP1, 33, 5 },
-       { DBGBUS_SSPP1, 33, 6 },
-       { DBGBUS_SSPP1, 33, 7 },
-
-       { DBGBUS_SSPP1, 34, 0 },
-       { DBGBUS_SSPP1, 34, 1 },
-       { DBGBUS_SSPP1, 34, 2 },
-       { DBGBUS_SSPP1, 34, 3 },
-       { DBGBUS_SSPP1, 34, 4 },
-       { DBGBUS_SSPP1, 34, 5 },
-       { DBGBUS_SSPP1, 34, 6 },
-       { DBGBUS_SSPP1, 34, 7 },
-
-       { DBGBUS_SSPP1, 35, 0 },
-       { DBGBUS_SSPP1, 35, 1 },
-       { DBGBUS_SSPP1, 35, 2 },
-
-       /* dma 1 */
-       { DBGBUS_SSPP1, 40, 0 },
-       { DBGBUS_SSPP1, 40, 1 },
-       { DBGBUS_SSPP1, 40, 2 },
-       { DBGBUS_SSPP1, 40, 3 },
-       { DBGBUS_SSPP1, 40, 4 },
-       { DBGBUS_SSPP1, 40, 5 },
-       { DBGBUS_SSPP1, 40, 6 },
-       { DBGBUS_SSPP1, 40, 7 },
-
-       { DBGBUS_SSPP1, 41, 0 },
-       { DBGBUS_SSPP1, 41, 1 },
-       { DBGBUS_SSPP1, 41, 2 },
-       { DBGBUS_SSPP1, 41, 3 },
-       { DBGBUS_SSPP1, 41, 4 },
-       { DBGBUS_SSPP1, 41, 5 },
-       { DBGBUS_SSPP1, 41, 6 },
-       { DBGBUS_SSPP1, 41, 7 },
-
-       { DBGBUS_SSPP1, 42, 0 },
-       { DBGBUS_SSPP1, 42, 1 },
-       { DBGBUS_SSPP1, 42, 2 },
-       { DBGBUS_SSPP1, 42, 3 },
-       { DBGBUS_SSPP1, 42, 4 },
-       { DBGBUS_SSPP1, 42, 5 },
-       { DBGBUS_SSPP1, 42, 6 },
-       { DBGBUS_SSPP1, 42, 7 },
-
-       { DBGBUS_SSPP1, 44, 0 },
-       { DBGBUS_SSPP1, 44, 1 },
-       { DBGBUS_SSPP1, 44, 2 },
-       { DBGBUS_SSPP1, 44, 3 },
-       { DBGBUS_SSPP1, 44, 4 },
-       { DBGBUS_SSPP1, 44, 5 },
-       { DBGBUS_SSPP1, 44, 6 },
-       { DBGBUS_SSPP1, 44, 7 },
-
-       { DBGBUS_SSPP1, 45, 0 },
-       { DBGBUS_SSPP1, 45, 1 },
-       { DBGBUS_SSPP1, 45, 2 },
-       { DBGBUS_SSPP1, 45, 3 },
-       { DBGBUS_SSPP1, 45, 4 },
-       { DBGBUS_SSPP1, 45, 5 },
-       { DBGBUS_SSPP1, 45, 6 },
-       { DBGBUS_SSPP1, 45, 7 },
-
-       /* cursor 1 */
-       { DBGBUS_SSPP1, 80, 0 },
-       { DBGBUS_SSPP1, 80, 1 },
-       { DBGBUS_SSPP1, 80, 2 },
-       { DBGBUS_SSPP1, 80, 3 },
-       { DBGBUS_SSPP1, 80, 4 },
-       { DBGBUS_SSPP1, 80, 5 },
-       { DBGBUS_SSPP1, 80, 6 },
-       { DBGBUS_SSPP1, 80, 7 },
-
-       { DBGBUS_SSPP1, 81, 0 },
-       { DBGBUS_SSPP1, 81, 1 },
-       { DBGBUS_SSPP1, 81, 2 },
-       { DBGBUS_SSPP1, 81, 3 },
-       { DBGBUS_SSPP1, 81, 4 },
-       { DBGBUS_SSPP1, 81, 5 },
-       { DBGBUS_SSPP1, 81, 6 },
-       { DBGBUS_SSPP1, 81, 7 },
-
-       { DBGBUS_SSPP1, 82, 0 },
-       { DBGBUS_SSPP1, 82, 1 },
-       { DBGBUS_SSPP1, 82, 2 },
-       { DBGBUS_SSPP1, 82, 3 },
-       { DBGBUS_SSPP1, 82, 4 },
-       { DBGBUS_SSPP1, 82, 5 },
-       { DBGBUS_SSPP1, 82, 6 },
-       { DBGBUS_SSPP1, 82, 7 },
-
-       { DBGBUS_SSPP1, 83, 0 },
-       { DBGBUS_SSPP1, 83, 1 },
-       { DBGBUS_SSPP1, 83, 2 },
-       { DBGBUS_SSPP1, 83, 3 },
-       { DBGBUS_SSPP1, 83, 4 },
-       { DBGBUS_SSPP1, 83, 5 },
-       { DBGBUS_SSPP1, 83, 6 },
-       { DBGBUS_SSPP1, 83, 7 },
-
-       { DBGBUS_SSPP1, 84, 0 },
-       { DBGBUS_SSPP1, 84, 1 },
-       { DBGBUS_SSPP1, 84, 2 },
-       { DBGBUS_SSPP1, 84, 3 },
-       { DBGBUS_SSPP1, 84, 4 },
-       { DBGBUS_SSPP1, 84, 5 },
-       { DBGBUS_SSPP1, 84, 6 },
-       { DBGBUS_SSPP1, 84, 7 },
-
-       /* dspp */
-       { DBGBUS_DSPP, 13, 0 },
-       { DBGBUS_DSPP, 19, 0 },
-       { DBGBUS_DSPP, 14, 0 },
-       { DBGBUS_DSPP, 14, 1 },
-       { DBGBUS_DSPP, 14, 3 },
-       { DBGBUS_DSPP, 20, 0 },
-       { DBGBUS_DSPP, 20, 1 },
-       { DBGBUS_DSPP, 20, 3 },
-
-       /* ppb_0 */
-       { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
-       /* ppb_1 */
-       { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
-       /* lm_lut */
-       { DBGBUS_DSPP, 109, 0 },
-       { DBGBUS_DSPP, 105, 0 },
-       { DBGBUS_DSPP, 103, 0 },
-
-       /* tear-check */
-       { DBGBUS_PERIPH, 63, 0 },
-       { DBGBUS_PERIPH, 64, 0 },
-       { DBGBUS_PERIPH, 65, 0 },
-       { DBGBUS_PERIPH, 73, 0 },
-       { DBGBUS_PERIPH, 74, 0 },
-
-       /* crossbar */
-       { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
-       /* rotator */
-       { DBGBUS_DSPP, 9, 0},
-
-       /* blend */
-       /* LM0 */
-       { DBGBUS_DSPP, 63, 0},
-       { DBGBUS_DSPP, 63, 1},
-       { DBGBUS_DSPP, 63, 2},
-       { DBGBUS_DSPP, 63, 3},
-       { DBGBUS_DSPP, 63, 4},
-       { DBGBUS_DSPP, 63, 5},
-       { DBGBUS_DSPP, 63, 6},
-       { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 64, 0},
-       { DBGBUS_DSPP, 64, 1},
-       { DBGBUS_DSPP, 64, 2},
-       { DBGBUS_DSPP, 64, 3},
-       { DBGBUS_DSPP, 64, 4},
-       { DBGBUS_DSPP, 64, 5},
-       { DBGBUS_DSPP, 64, 6},
-       { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 65, 0},
-       { DBGBUS_DSPP, 65, 1},
-       { DBGBUS_DSPP, 65, 2},
-       { DBGBUS_DSPP, 65, 3},
-       { DBGBUS_DSPP, 65, 4},
-       { DBGBUS_DSPP, 65, 5},
-       { DBGBUS_DSPP, 65, 6},
-       { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 66, 0},
-       { DBGBUS_DSPP, 66, 1},
-       { DBGBUS_DSPP, 66, 2},
-       { DBGBUS_DSPP, 66, 3},
-       { DBGBUS_DSPP, 66, 4},
-       { DBGBUS_DSPP, 66, 5},
-       { DBGBUS_DSPP, 66, 6},
-       { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 67, 0},
-       { DBGBUS_DSPP, 67, 1},
-       { DBGBUS_DSPP, 67, 2},
-       { DBGBUS_DSPP, 67, 3},
-       { DBGBUS_DSPP, 67, 4},
-       { DBGBUS_DSPP, 67, 5},
-       { DBGBUS_DSPP, 67, 6},
-       { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 68, 0},
-       { DBGBUS_DSPP, 68, 1},
-       { DBGBUS_DSPP, 68, 2},
-       { DBGBUS_DSPP, 68, 3},
-       { DBGBUS_DSPP, 68, 4},
-       { DBGBUS_DSPP, 68, 5},
-       { DBGBUS_DSPP, 68, 6},
-       { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 69, 0},
-       { DBGBUS_DSPP, 69, 1},
-       { DBGBUS_DSPP, 69, 2},
-       { DBGBUS_DSPP, 69, 3},
-       { DBGBUS_DSPP, 69, 4},
-       { DBGBUS_DSPP, 69, 5},
-       { DBGBUS_DSPP, 69, 6},
-       { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
-       /* LM1 */
-       { DBGBUS_DSPP, 70, 0},
-       { DBGBUS_DSPP, 70, 1},
-       { DBGBUS_DSPP, 70, 2},
-       { DBGBUS_DSPP, 70, 3},
-       { DBGBUS_DSPP, 70, 4},
-       { DBGBUS_DSPP, 70, 5},
-       { DBGBUS_DSPP, 70, 6},
-       { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 71, 0},
-       { DBGBUS_DSPP, 71, 1},
-       { DBGBUS_DSPP, 71, 2},
-       { DBGBUS_DSPP, 71, 3},
-       { DBGBUS_DSPP, 71, 4},
-       { DBGBUS_DSPP, 71, 5},
-       { DBGBUS_DSPP, 71, 6},
-       { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 72, 0},
-       { DBGBUS_DSPP, 72, 1},
-       { DBGBUS_DSPP, 72, 2},
-       { DBGBUS_DSPP, 72, 3},
-       { DBGBUS_DSPP, 72, 4},
-       { DBGBUS_DSPP, 72, 5},
-       { DBGBUS_DSPP, 72, 6},
-       { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 73, 0},
-       { DBGBUS_DSPP, 73, 1},
-       { DBGBUS_DSPP, 73, 2},
-       { DBGBUS_DSPP, 73, 3},
-       { DBGBUS_DSPP, 73, 4},
-       { DBGBUS_DSPP, 73, 5},
-       { DBGBUS_DSPP, 73, 6},
-       { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 74, 0},
-       { DBGBUS_DSPP, 74, 1},
-       { DBGBUS_DSPP, 74, 2},
-       { DBGBUS_DSPP, 74, 3},
-       { DBGBUS_DSPP, 74, 4},
-       { DBGBUS_DSPP, 74, 5},
-       { DBGBUS_DSPP, 74, 6},
-       { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 75, 0},
-       { DBGBUS_DSPP, 75, 1},
-       { DBGBUS_DSPP, 75, 2},
-       { DBGBUS_DSPP, 75, 3},
-       { DBGBUS_DSPP, 75, 4},
-       { DBGBUS_DSPP, 75, 5},
-       { DBGBUS_DSPP, 75, 6},
-       { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 76, 0},
-       { DBGBUS_DSPP, 76, 1},
-       { DBGBUS_DSPP, 76, 2},
-       { DBGBUS_DSPP, 76, 3},
-       { DBGBUS_DSPP, 76, 4},
-       { DBGBUS_DSPP, 76, 5},
-       { DBGBUS_DSPP, 76, 6},
-       { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
-       /* LM2 */
-       { DBGBUS_DSPP, 77, 0},
-       { DBGBUS_DSPP, 77, 1},
-       { DBGBUS_DSPP, 77, 2},
-       { DBGBUS_DSPP, 77, 3},
-       { DBGBUS_DSPP, 77, 4},
-       { DBGBUS_DSPP, 77, 5},
-       { DBGBUS_DSPP, 77, 6},
-       { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 78, 0},
-       { DBGBUS_DSPP, 78, 1},
-       { DBGBUS_DSPP, 78, 2},
-       { DBGBUS_DSPP, 78, 3},
-       { DBGBUS_DSPP, 78, 4},
-       { DBGBUS_DSPP, 78, 5},
-       { DBGBUS_DSPP, 78, 6},
-       { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 79, 0},
-       { DBGBUS_DSPP, 79, 1},
-       { DBGBUS_DSPP, 79, 2},
-       { DBGBUS_DSPP, 79, 3},
-       { DBGBUS_DSPP, 79, 4},
-       { DBGBUS_DSPP, 79, 5},
-       { DBGBUS_DSPP, 79, 6},
-       { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 80, 0},
-       { DBGBUS_DSPP, 80, 1},
-       { DBGBUS_DSPP, 80, 2},
-       { DBGBUS_DSPP, 80, 3},
-       { DBGBUS_DSPP, 80, 4},
-       { DBGBUS_DSPP, 80, 5},
-       { DBGBUS_DSPP, 80, 6},
-       { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 81, 0},
-       { DBGBUS_DSPP, 81, 1},
-       { DBGBUS_DSPP, 81, 2},
-       { DBGBUS_DSPP, 81, 3},
-       { DBGBUS_DSPP, 81, 4},
-       { DBGBUS_DSPP, 81, 5},
-       { DBGBUS_DSPP, 81, 6},
-       { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 82, 0},
-       { DBGBUS_DSPP, 82, 1},
-       { DBGBUS_DSPP, 82, 2},
-       { DBGBUS_DSPP, 82, 3},
-       { DBGBUS_DSPP, 82, 4},
-       { DBGBUS_DSPP, 82, 5},
-       { DBGBUS_DSPP, 82, 6},
-       { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 83, 0},
-       { DBGBUS_DSPP, 83, 1},
-       { DBGBUS_DSPP, 83, 2},
-       { DBGBUS_DSPP, 83, 3},
-       { DBGBUS_DSPP, 83, 4},
-       { DBGBUS_DSPP, 83, 5},
-       { DBGBUS_DSPP, 83, 6},
-       { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
-       /* csc */
-       { DBGBUS_SSPP0, 7, 0},
-       { DBGBUS_SSPP0, 7, 1},
-       { DBGBUS_SSPP0, 27, 0},
-       { DBGBUS_SSPP0, 27, 1},
-       { DBGBUS_SSPP1, 7, 0},
-       { DBGBUS_SSPP1, 7, 1},
-       { DBGBUS_SSPP1, 27, 0},
-       { DBGBUS_SSPP1, 27, 1},
-
-       /* pcc */
-       { DBGBUS_SSPP0, 3,  3},
-       { DBGBUS_SSPP0, 23, 3},
-       { DBGBUS_SSPP0, 33, 3},
-       { DBGBUS_SSPP0, 43, 3},
-       { DBGBUS_SSPP1, 3,  3},
-       { DBGBUS_SSPP1, 23, 3},
-       { DBGBUS_SSPP1, 33, 3},
-       { DBGBUS_SSPP1, 43, 3},
-
-       /* spa */
-       { DBGBUS_SSPP0, 8,  0},
-       { DBGBUS_SSPP0, 28, 0},
-       { DBGBUS_SSPP1, 8,  0},
-       { DBGBUS_SSPP1, 28, 0},
-       { DBGBUS_DSPP, 13, 0},
-       { DBGBUS_DSPP, 19, 0},
-
-       /* igc */
-       { DBGBUS_SSPP0, 9,  0},
-       { DBGBUS_SSPP0, 9,  1},
-       { DBGBUS_SSPP0, 9,  3},
-       { DBGBUS_SSPP0, 29, 0},
-       { DBGBUS_SSPP0, 29, 1},
-       { DBGBUS_SSPP0, 29, 3},
-       { DBGBUS_SSPP0, 17, 0},
-       { DBGBUS_SSPP0, 17, 1},
-       { DBGBUS_SSPP0, 17, 3},
-       { DBGBUS_SSPP0, 37, 0},
-       { DBGBUS_SSPP0, 37, 1},
-       { DBGBUS_SSPP0, 37, 3},
-       { DBGBUS_SSPP0, 46, 0},
-       { DBGBUS_SSPP0, 46, 1},
-       { DBGBUS_SSPP0, 46, 3},
-
-       { DBGBUS_SSPP1, 9,  0},
-       { DBGBUS_SSPP1, 9,  1},
-       { DBGBUS_SSPP1, 9,  3},
-       { DBGBUS_SSPP1, 29, 0},
-       { DBGBUS_SSPP1, 29, 1},
-       { DBGBUS_SSPP1, 29, 3},
-       { DBGBUS_SSPP1, 17, 0},
-       { DBGBUS_SSPP1, 17, 1},
-       { DBGBUS_SSPP1, 17, 3},
-       { DBGBUS_SSPP1, 37, 0},
-       { DBGBUS_SSPP1, 37, 1},
-       { DBGBUS_SSPP1, 37, 3},
-       { DBGBUS_SSPP1, 46, 0},
-       { DBGBUS_SSPP1, 46, 1},
-       { DBGBUS_SSPP1, 46, 3},
-
-       { DBGBUS_DSPP, 14, 0},
-       { DBGBUS_DSPP, 14, 1},
-       { DBGBUS_DSPP, 14, 3},
-       { DBGBUS_DSPP, 20, 0},
-       { DBGBUS_DSPP, 20, 1},
-       { DBGBUS_DSPP, 20, 3},
-
-       { DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
-       /* Unpack 0 sspp 0*/
-       { DBGBUS_SSPP0, 50, 2 },
-       { DBGBUS_SSPP0, 60, 2 },
-       { DBGBUS_SSPP0, 70, 2 },
-
-       /* Upack 0 sspp 1*/
-       { DBGBUS_SSPP1, 50, 2 },
-       { DBGBUS_SSPP1, 60, 2 },
-       { DBGBUS_SSPP1, 70, 2 },
-
-       /* scheduler */
-       { DBGBUS_DSPP, 130, 0 },
-       { DBGBUS_DSPP, 130, 1 },
-       { DBGBUS_DSPP, 130, 2 },
-       { DBGBUS_DSPP, 130, 3 },
-       { DBGBUS_DSPP, 130, 4 },
-       { DBGBUS_DSPP, 130, 5 },
-
-       /* qseed */
-       { DBGBUS_SSPP0, 6, 0},
-       { DBGBUS_SSPP0, 6, 1},
-       { DBGBUS_SSPP0, 26, 0},
-       { DBGBUS_SSPP0, 26, 1},
-       { DBGBUS_SSPP1, 6, 0},
-       { DBGBUS_SSPP1, 6, 1},
-       { DBGBUS_SSPP1, 26, 0},
-       { DBGBUS_SSPP1, 26, 1},
-
-       /* scale */
-       { DBGBUS_SSPP0, 16, 0},
-       { DBGBUS_SSPP0, 16, 1},
-       { DBGBUS_SSPP0, 36, 0},
-       { DBGBUS_SSPP0, 36, 1},
-       { DBGBUS_SSPP1, 16, 0},
-       { DBGBUS_SSPP1, 16, 1},
-       { DBGBUS_SSPP1, 36, 0},
-       { DBGBUS_SSPP1, 36, 1},
-
-       /* fetch sspp0 */
-
-       /* vig 0 */
-       { DBGBUS_SSPP0, 0, 0 },
-       { DBGBUS_SSPP0, 0, 1 },
-       { DBGBUS_SSPP0, 0, 2 },
-       { DBGBUS_SSPP0, 0, 3 },
-       { DBGBUS_SSPP0, 0, 4 },
-       { DBGBUS_SSPP0, 0, 5 },
-       { DBGBUS_SSPP0, 0, 6 },
-       { DBGBUS_SSPP0, 0, 7 },
-
-       { DBGBUS_SSPP0, 1, 0 },
-       { DBGBUS_SSPP0, 1, 1 },
-       { DBGBUS_SSPP0, 1, 2 },
-       { DBGBUS_SSPP0, 1, 3 },
-       { DBGBUS_SSPP0, 1, 4 },
-       { DBGBUS_SSPP0, 1, 5 },
-       { DBGBUS_SSPP0, 1, 6 },
-       { DBGBUS_SSPP0, 1, 7 },
-
-       { DBGBUS_SSPP0, 2, 0 },
-       { DBGBUS_SSPP0, 2, 1 },
-       { DBGBUS_SSPP0, 2, 2 },
-       { DBGBUS_SSPP0, 2, 3 },
-       { DBGBUS_SSPP0, 2, 4 },
-       { DBGBUS_SSPP0, 2, 5 },
-       { DBGBUS_SSPP0, 2, 6 },
-       { DBGBUS_SSPP0, 2, 7 },
-
-       { DBGBUS_SSPP0, 4, 0 },
-       { DBGBUS_SSPP0, 4, 1 },
-       { DBGBUS_SSPP0, 4, 2 },
-       { DBGBUS_SSPP0, 4, 3 },
-       { DBGBUS_SSPP0, 4, 4 },
-       { DBGBUS_SSPP0, 4, 5 },
-       { DBGBUS_SSPP0, 4, 6 },
-       { DBGBUS_SSPP0, 4, 7 },
-
-       { DBGBUS_SSPP0, 5, 0 },
-       { DBGBUS_SSPP0, 5, 1 },
-       { DBGBUS_SSPP0, 5, 2 },
-       { DBGBUS_SSPP0, 5, 3 },
-       { DBGBUS_SSPP0, 5, 4 },
-       { DBGBUS_SSPP0, 5, 5 },
-       { DBGBUS_SSPP0, 5, 6 },
-       { DBGBUS_SSPP0, 5, 7 },
-
-       /* vig 2 */
-       { DBGBUS_SSPP0, 20, 0 },
-       { DBGBUS_SSPP0, 20, 1 },
-       { DBGBUS_SSPP0, 20, 2 },
-       { DBGBUS_SSPP0, 20, 3 },
-       { DBGBUS_SSPP0, 20, 4 },
-       { DBGBUS_SSPP0, 20, 5 },
-       { DBGBUS_SSPP0, 20, 6 },
-       { DBGBUS_SSPP0, 20, 7 },
-
-       { DBGBUS_SSPP0, 21, 0 },
-       { DBGBUS_SSPP0, 21, 1 },
-       { DBGBUS_SSPP0, 21, 2 },
-       { DBGBUS_SSPP0, 21, 3 },
-       { DBGBUS_SSPP0, 21, 4 },
-       { DBGBUS_SSPP0, 21, 5 },
-       { DBGBUS_SSPP0, 21, 6 },
-       { DBGBUS_SSPP0, 21, 7 },
-
-       { DBGBUS_SSPP0, 22, 0 },
-       { DBGBUS_SSPP0, 22, 1 },
-       { DBGBUS_SSPP0, 22, 2 },
-       { DBGBUS_SSPP0, 22, 3 },
-       { DBGBUS_SSPP0, 22, 4 },
-       { DBGBUS_SSPP0, 22, 5 },
-       { DBGBUS_SSPP0, 22, 6 },
-       { DBGBUS_SSPP0, 22, 7 },
-
-       { DBGBUS_SSPP0, 24, 0 },
-       { DBGBUS_SSPP0, 24, 1 },
-       { DBGBUS_SSPP0, 24, 2 },
-       { DBGBUS_SSPP0, 24, 3 },
-       { DBGBUS_SSPP0, 24, 4 },
-       { DBGBUS_SSPP0, 24, 5 },
-       { DBGBUS_SSPP0, 24, 6 },
-       { DBGBUS_SSPP0, 24, 7 },
-
-       { DBGBUS_SSPP0, 25, 0 },
-       { DBGBUS_SSPP0, 25, 1 },
-       { DBGBUS_SSPP0, 25, 2 },
-       { DBGBUS_SSPP0, 25, 3 },
-       { DBGBUS_SSPP0, 25, 4 },
-       { DBGBUS_SSPP0, 25, 5 },
-       { DBGBUS_SSPP0, 25, 6 },
-       { DBGBUS_SSPP0, 25, 7 },
-
-       /* dma 2 */
-       { DBGBUS_SSPP0, 30, 0 },
-       { DBGBUS_SSPP0, 30, 1 },
-       { DBGBUS_SSPP0, 30, 2 },
-       { DBGBUS_SSPP0, 30, 3 },
-       { DBGBUS_SSPP0, 30, 4 },
-       { DBGBUS_SSPP0, 30, 5 },
-       { DBGBUS_SSPP0, 30, 6 },
-       { DBGBUS_SSPP0, 30, 7 },
-
-       { DBGBUS_SSPP0, 31, 0 },
-       { DBGBUS_SSPP0, 31, 1 },
-       { DBGBUS_SSPP0, 31, 2 },
-       { DBGBUS_SSPP0, 31, 3 },
-       { DBGBUS_SSPP0, 31, 4 },
-       { DBGBUS_SSPP0, 31, 5 },
-       { DBGBUS_SSPP0, 31, 6 },
-       { DBGBUS_SSPP0, 31, 7 },
-
-       { DBGBUS_SSPP0, 32, 0 },
-       { DBGBUS_SSPP0, 32, 1 },
-       { DBGBUS_SSPP0, 32, 2 },
-       { DBGBUS_SSPP0, 32, 3 },
-       { DBGBUS_SSPP0, 32, 4 },
-       { DBGBUS_SSPP0, 32, 5 },
-       { DBGBUS_SSPP0, 32, 6 },
-       { DBGBUS_SSPP0, 32, 7 },
-
-       { DBGBUS_SSPP0, 33, 0 },
-       { DBGBUS_SSPP0, 33, 1 },
-       { DBGBUS_SSPP0, 33, 2 },
-       { DBGBUS_SSPP0, 33, 3 },
-       { DBGBUS_SSPP0, 33, 4 },
-       { DBGBUS_SSPP0, 33, 5 },
-       { DBGBUS_SSPP0, 33, 6 },
-       { DBGBUS_SSPP0, 33, 7 },
-
-       { DBGBUS_SSPP0, 34, 0 },
-       { DBGBUS_SSPP0, 34, 1 },
-       { DBGBUS_SSPP0, 34, 2 },
-       { DBGBUS_SSPP0, 34, 3 },
-       { DBGBUS_SSPP0, 34, 4 },
-       { DBGBUS_SSPP0, 34, 5 },
-       { DBGBUS_SSPP0, 34, 6 },
-       { DBGBUS_SSPP0, 34, 7 },
-
-       { DBGBUS_SSPP0, 35, 0 },
-       { DBGBUS_SSPP0, 35, 1 },
-       { DBGBUS_SSPP0, 35, 2 },
-       { DBGBUS_SSPP0, 35, 3 },
-
-       /* dma 0 */
-       { DBGBUS_SSPP0, 40, 0 },
-       { DBGBUS_SSPP0, 40, 1 },
-       { DBGBUS_SSPP0, 40, 2 },
-       { DBGBUS_SSPP0, 40, 3 },
-       { DBGBUS_SSPP0, 40, 4 },
-       { DBGBUS_SSPP0, 40, 5 },
-       { DBGBUS_SSPP0, 40, 6 },
-       { DBGBUS_SSPP0, 40, 7 },
-
-       { DBGBUS_SSPP0, 41, 0 },
-       { DBGBUS_SSPP0, 41, 1 },
-       { DBGBUS_SSPP0, 41, 2 },
-       { DBGBUS_SSPP0, 41, 3 },
-       { DBGBUS_SSPP0, 41, 4 },
-       { DBGBUS_SSPP0, 41, 5 },
-       { DBGBUS_SSPP0, 41, 6 },
-       { DBGBUS_SSPP0, 41, 7 },
-
-       { DBGBUS_SSPP0, 42, 0 },
-       { DBGBUS_SSPP0, 42, 1 },
-       { DBGBUS_SSPP0, 42, 2 },
-       { DBGBUS_SSPP0, 42, 3 },
-       { DBGBUS_SSPP0, 42, 4 },
-       { DBGBUS_SSPP0, 42, 5 },
-       { DBGBUS_SSPP0, 42, 6 },
-       { DBGBUS_SSPP0, 42, 7 },
-
-       { DBGBUS_SSPP0, 44, 0 },
-       { DBGBUS_SSPP0, 44, 1 },
-       { DBGBUS_SSPP0, 44, 2 },
-       { DBGBUS_SSPP0, 44, 3 },
-       { DBGBUS_SSPP0, 44, 4 },
-       { DBGBUS_SSPP0, 44, 5 },
-       { DBGBUS_SSPP0, 44, 6 },
-       { DBGBUS_SSPP0, 44, 7 },
-
-       { DBGBUS_SSPP0, 45, 0 },
-       { DBGBUS_SSPP0, 45, 1 },
-       { DBGBUS_SSPP0, 45, 2 },
-       { DBGBUS_SSPP0, 45, 3 },
-       { DBGBUS_SSPP0, 45, 4 },
-       { DBGBUS_SSPP0, 45, 5 },
-       { DBGBUS_SSPP0, 45, 6 },
-       { DBGBUS_SSPP0, 45, 7 },
-
-       /* fetch sspp1 */
-       /* vig 1 */
-       { DBGBUS_SSPP1, 0, 0 },
-       { DBGBUS_SSPP1, 0, 1 },
-       { DBGBUS_SSPP1, 0, 2 },
-       { DBGBUS_SSPP1, 0, 3 },
-       { DBGBUS_SSPP1, 0, 4 },
-       { DBGBUS_SSPP1, 0, 5 },
-       { DBGBUS_SSPP1, 0, 6 },
-       { DBGBUS_SSPP1, 0, 7 },
-
-       { DBGBUS_SSPP1, 1, 0 },
-       { DBGBUS_SSPP1, 1, 1 },
-       { DBGBUS_SSPP1, 1, 2 },
-       { DBGBUS_SSPP1, 1, 3 },
-       { DBGBUS_SSPP1, 1, 4 },
-       { DBGBUS_SSPP1, 1, 5 },
-       { DBGBUS_SSPP1, 1, 6 },
-       { DBGBUS_SSPP1, 1, 7 },
-
-       { DBGBUS_SSPP1, 2, 0 },
-       { DBGBUS_SSPP1, 2, 1 },
-       { DBGBUS_SSPP1, 2, 2 },
-       { DBGBUS_SSPP1, 2, 3 },
-       { DBGBUS_SSPP1, 2, 4 },
-       { DBGBUS_SSPP1, 2, 5 },
-       { DBGBUS_SSPP1, 2, 6 },
-       { DBGBUS_SSPP1, 2, 7 },
-
-       { DBGBUS_SSPP1, 4, 0 },
-       { DBGBUS_SSPP1, 4, 1 },
-       { DBGBUS_SSPP1, 4, 2 },
-       { DBGBUS_SSPP1, 4, 3 },
-       { DBGBUS_SSPP1, 4, 4 },
-       { DBGBUS_SSPP1, 4, 5 },
-       { DBGBUS_SSPP1, 4, 6 },
-       { DBGBUS_SSPP1, 4, 7 },
-
-       { DBGBUS_SSPP1, 5, 0 },
-       { DBGBUS_SSPP1, 5, 1 },
-       { DBGBUS_SSPP1, 5, 2 },
-       { DBGBUS_SSPP1, 5, 3 },
-       { DBGBUS_SSPP1, 5, 4 },
-       { DBGBUS_SSPP1, 5, 5 },
-       { DBGBUS_SSPP1, 5, 6 },
-       { DBGBUS_SSPP1, 5, 7 },
-
-       /* vig 3 */
-       { DBGBUS_SSPP1, 20, 0 },
-       { DBGBUS_SSPP1, 20, 1 },
-       { DBGBUS_SSPP1, 20, 2 },
-       { DBGBUS_SSPP1, 20, 3 },
-       { DBGBUS_SSPP1, 20, 4 },
-       { DBGBUS_SSPP1, 20, 5 },
-       { DBGBUS_SSPP1, 20, 6 },
-       { DBGBUS_SSPP1, 20, 7 },
-
-       { DBGBUS_SSPP1, 21, 0 },
-       { DBGBUS_SSPP1, 21, 1 },
-       { DBGBUS_SSPP1, 21, 2 },
-       { DBGBUS_SSPP1, 21, 3 },
-       { DBGBUS_SSPP1, 21, 4 },
-       { DBGBUS_SSPP1, 21, 5 },
-       { DBGBUS_SSPP1, 21, 6 },
-       { DBGBUS_SSPP1, 21, 7 },
-
-       { DBGBUS_SSPP1, 22, 0 },
-       { DBGBUS_SSPP1, 22, 1 },
-       { DBGBUS_SSPP1, 22, 2 },
-       { DBGBUS_SSPP1, 22, 3 },
-       { DBGBUS_SSPP1, 22, 4 },
-       { DBGBUS_SSPP1, 22, 5 },
-       { DBGBUS_SSPP1, 22, 6 },
-       { DBGBUS_SSPP1, 22, 7 },
-
-       { DBGBUS_SSPP1, 24, 0 },
-       { DBGBUS_SSPP1, 24, 1 },
-       { DBGBUS_SSPP1, 24, 2 },
-       { DBGBUS_SSPP1, 24, 3 },
-       { DBGBUS_SSPP1, 24, 4 },
-       { DBGBUS_SSPP1, 24, 5 },
-       { DBGBUS_SSPP1, 24, 6 },
-       { DBGBUS_SSPP1, 24, 7 },
-
-       { DBGBUS_SSPP1, 25, 0 },
-       { DBGBUS_SSPP1, 25, 1 },
-       { DBGBUS_SSPP1, 25, 2 },
-       { DBGBUS_SSPP1, 25, 3 },
-       { DBGBUS_SSPP1, 25, 4 },
-       { DBGBUS_SSPP1, 25, 5 },
-       { DBGBUS_SSPP1, 25, 6 },
-       { DBGBUS_SSPP1, 25, 7 },
-
-       /* dma 3 */
-       { DBGBUS_SSPP1, 30, 0 },
-       { DBGBUS_SSPP1, 30, 1 },
-       { DBGBUS_SSPP1, 30, 2 },
-       { DBGBUS_SSPP1, 30, 3 },
-       { DBGBUS_SSPP1, 30, 4 },
-       { DBGBUS_SSPP1, 30, 5 },
-       { DBGBUS_SSPP1, 30, 6 },
-       { DBGBUS_SSPP1, 30, 7 },
-
-       { DBGBUS_SSPP1, 31, 0 },
-       { DBGBUS_SSPP1, 31, 1 },
-       { DBGBUS_SSPP1, 31, 2 },
-       { DBGBUS_SSPP1, 31, 3 },
-       { DBGBUS_SSPP1, 31, 4 },
-       { DBGBUS_SSPP1, 31, 5 },
-       { DBGBUS_SSPP1, 31, 6 },
-       { DBGBUS_SSPP1, 31, 7 },
-
-       { DBGBUS_SSPP1, 32, 0 },
-       { DBGBUS_SSPP1, 32, 1 },
-       { DBGBUS_SSPP1, 32, 2 },
-       { DBGBUS_SSPP1, 32, 3 },
-       { DBGBUS_SSPP1, 32, 4 },
-       { DBGBUS_SSPP1, 32, 5 },
-       { DBGBUS_SSPP1, 32, 6 },
-       { DBGBUS_SSPP1, 32, 7 },
-
-       { DBGBUS_SSPP1, 33, 0 },
-       { DBGBUS_SSPP1, 33, 1 },
-       { DBGBUS_SSPP1, 33, 2 },
-       { DBGBUS_SSPP1, 33, 3 },
-       { DBGBUS_SSPP1, 33, 4 },
-       { DBGBUS_SSPP1, 33, 5 },
-       { DBGBUS_SSPP1, 33, 6 },
-       { DBGBUS_SSPP1, 33, 7 },
-
-       { DBGBUS_SSPP1, 34, 0 },
-       { DBGBUS_SSPP1, 34, 1 },
-       { DBGBUS_SSPP1, 34, 2 },
-       { DBGBUS_SSPP1, 34, 3 },
-       { DBGBUS_SSPP1, 34, 4 },
-       { DBGBUS_SSPP1, 34, 5 },
-       { DBGBUS_SSPP1, 34, 6 },
-       { DBGBUS_SSPP1, 34, 7 },
-
-       { DBGBUS_SSPP1, 35, 0 },
-       { DBGBUS_SSPP1, 35, 1 },
-       { DBGBUS_SSPP1, 35, 2 },
-
-       /* dma 1 */
-       { DBGBUS_SSPP1, 40, 0 },
-       { DBGBUS_SSPP1, 40, 1 },
-       { DBGBUS_SSPP1, 40, 2 },
-       { DBGBUS_SSPP1, 40, 3 },
-       { DBGBUS_SSPP1, 40, 4 },
-       { DBGBUS_SSPP1, 40, 5 },
-       { DBGBUS_SSPP1, 40, 6 },
-       { DBGBUS_SSPP1, 40, 7 },
-
-       { DBGBUS_SSPP1, 41, 0 },
-       { DBGBUS_SSPP1, 41, 1 },
-       { DBGBUS_SSPP1, 41, 2 },
-       { DBGBUS_SSPP1, 41, 3 },
-       { DBGBUS_SSPP1, 41, 4 },
-       { DBGBUS_SSPP1, 41, 5 },
-       { DBGBUS_SSPP1, 41, 6 },
-       { DBGBUS_SSPP1, 41, 7 },
-
-       { DBGBUS_SSPP1, 42, 0 },
-       { DBGBUS_SSPP1, 42, 1 },
-       { DBGBUS_SSPP1, 42, 2 },
-       { DBGBUS_SSPP1, 42, 3 },
-       { DBGBUS_SSPP1, 42, 4 },
-       { DBGBUS_SSPP1, 42, 5 },
-       { DBGBUS_SSPP1, 42, 6 },
-       { DBGBUS_SSPP1, 42, 7 },
-
-       { DBGBUS_SSPP1, 44, 0 },
-       { DBGBUS_SSPP1, 44, 1 },
-       { DBGBUS_SSPP1, 44, 2 },
-       { DBGBUS_SSPP1, 44, 3 },
-       { DBGBUS_SSPP1, 44, 4 },
-       { DBGBUS_SSPP1, 44, 5 },
-       { DBGBUS_SSPP1, 44, 6 },
-       { DBGBUS_SSPP1, 44, 7 },
-
-       { DBGBUS_SSPP1, 45, 0 },
-       { DBGBUS_SSPP1, 45, 1 },
-       { DBGBUS_SSPP1, 45, 2 },
-       { DBGBUS_SSPP1, 45, 3 },
-       { DBGBUS_SSPP1, 45, 4 },
-       { DBGBUS_SSPP1, 45, 5 },
-       { DBGBUS_SSPP1, 45, 6 },
-       { DBGBUS_SSPP1, 45, 7 },
-
-       /* dspp */
-       { DBGBUS_DSPP, 13, 0 },
-       { DBGBUS_DSPP, 19, 0 },
-       { DBGBUS_DSPP, 14, 0 },
-       { DBGBUS_DSPP, 14, 1 },
-       { DBGBUS_DSPP, 14, 3 },
-       { DBGBUS_DSPP, 20, 0 },
-       { DBGBUS_DSPP, 20, 1 },
-       { DBGBUS_DSPP, 20, 3 },
-
-       /* ppb_0 */
-       { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
-       { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
-       /* ppb_1 */
-       { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
-       { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
-       /* lm_lut */
-       { DBGBUS_DSPP, 109, 0 },
-       { DBGBUS_DSPP, 105, 0 },
-       { DBGBUS_DSPP, 103, 0 },
-
-       /* crossbar */
-       { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
-       /* rotator */
-       { DBGBUS_DSPP, 9, 0},
-
-       /* blend */
-       /* LM0 */
-       { DBGBUS_DSPP, 63, 1},
-       { DBGBUS_DSPP, 63, 2},
-       { DBGBUS_DSPP, 63, 3},
-       { DBGBUS_DSPP, 63, 4},
-       { DBGBUS_DSPP, 63, 5},
-       { DBGBUS_DSPP, 63, 6},
-       { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 64, 1},
-       { DBGBUS_DSPP, 64, 2},
-       { DBGBUS_DSPP, 64, 3},
-       { DBGBUS_DSPP, 64, 4},
-       { DBGBUS_DSPP, 64, 5},
-       { DBGBUS_DSPP, 64, 6},
-       { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 65, 1},
-       { DBGBUS_DSPP, 65, 2},
-       { DBGBUS_DSPP, 65, 3},
-       { DBGBUS_DSPP, 65, 4},
-       { DBGBUS_DSPP, 65, 5},
-       { DBGBUS_DSPP, 65, 6},
-       { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 66, 1},
-       { DBGBUS_DSPP, 66, 2},
-       { DBGBUS_DSPP, 66, 3},
-       { DBGBUS_DSPP, 66, 4},
-       { DBGBUS_DSPP, 66, 5},
-       { DBGBUS_DSPP, 66, 6},
-       { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 67, 1},
-       { DBGBUS_DSPP, 67, 2},
-       { DBGBUS_DSPP, 67, 3},
-       { DBGBUS_DSPP, 67, 4},
-       { DBGBUS_DSPP, 67, 5},
-       { DBGBUS_DSPP, 67, 6},
-       { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 68, 1},
-       { DBGBUS_DSPP, 68, 2},
-       { DBGBUS_DSPP, 68, 3},
-       { DBGBUS_DSPP, 68, 4},
-       { DBGBUS_DSPP, 68, 5},
-       { DBGBUS_DSPP, 68, 6},
-       { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 69, 1},
-       { DBGBUS_DSPP, 69, 2},
-       { DBGBUS_DSPP, 69, 3},
-       { DBGBUS_DSPP, 69, 4},
-       { DBGBUS_DSPP, 69, 5},
-       { DBGBUS_DSPP, 69, 6},
-       { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 84, 1},
-       { DBGBUS_DSPP, 84, 2},
-       { DBGBUS_DSPP, 84, 3},
-       { DBGBUS_DSPP, 84, 4},
-       { DBGBUS_DSPP, 84, 5},
-       { DBGBUS_DSPP, 84, 6},
-       { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
-       { DBGBUS_DSPP, 85, 1},
-       { DBGBUS_DSPP, 85, 2},
-       { DBGBUS_DSPP, 85, 3},
-       { DBGBUS_DSPP, 85, 4},
-       { DBGBUS_DSPP, 85, 5},
-       { DBGBUS_DSPP, 85, 6},
-       { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
-       { DBGBUS_DSPP, 86, 1},
-       { DBGBUS_DSPP, 86, 2},
-       { DBGBUS_DSPP, 86, 3},
-       { DBGBUS_DSPP, 86, 4},
-       { DBGBUS_DSPP, 86, 5},
-       { DBGBUS_DSPP, 86, 6},
-       { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
-       { DBGBUS_DSPP, 87, 1},
-       { DBGBUS_DSPP, 87, 2},
-       { DBGBUS_DSPP, 87, 3},
-       { DBGBUS_DSPP, 87, 4},
-       { DBGBUS_DSPP, 87, 5},
-       { DBGBUS_DSPP, 87, 6},
-       { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
-       /* LM1 */
-       { DBGBUS_DSPP, 70, 1},
-       { DBGBUS_DSPP, 70, 2},
-       { DBGBUS_DSPP, 70, 3},
-       { DBGBUS_DSPP, 70, 4},
-       { DBGBUS_DSPP, 70, 5},
-       { DBGBUS_DSPP, 70, 6},
-       { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 71, 1},
-       { DBGBUS_DSPP, 71, 2},
-       { DBGBUS_DSPP, 71, 3},
-       { DBGBUS_DSPP, 71, 4},
-       { DBGBUS_DSPP, 71, 5},
-       { DBGBUS_DSPP, 71, 6},
-       { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 72, 1},
-       { DBGBUS_DSPP, 72, 2},
-       { DBGBUS_DSPP, 72, 3},
-       { DBGBUS_DSPP, 72, 4},
-       { DBGBUS_DSPP, 72, 5},
-       { DBGBUS_DSPP, 72, 6},
-       { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 73, 1},
-       { DBGBUS_DSPP, 73, 2},
-       { DBGBUS_DSPP, 73, 3},
-       { DBGBUS_DSPP, 73, 4},
-       { DBGBUS_DSPP, 73, 5},
-       { DBGBUS_DSPP, 73, 6},
-       { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 74, 1},
-       { DBGBUS_DSPP, 74, 2},
-       { DBGBUS_DSPP, 74, 3},
-       { DBGBUS_DSPP, 74, 4},
-       { DBGBUS_DSPP, 74, 5},
-       { DBGBUS_DSPP, 74, 6},
-       { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 75, 1},
-       { DBGBUS_DSPP, 75, 2},
-       { DBGBUS_DSPP, 75, 3},
-       { DBGBUS_DSPP, 75, 4},
-       { DBGBUS_DSPP, 75, 5},
-       { DBGBUS_DSPP, 75, 6},
-       { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 76, 1},
-       { DBGBUS_DSPP, 76, 2},
-       { DBGBUS_DSPP, 76, 3},
-       { DBGBUS_DSPP, 76, 4},
-       { DBGBUS_DSPP, 76, 5},
-       { DBGBUS_DSPP, 76, 6},
-       { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 88, 1},
-       { DBGBUS_DSPP, 88, 2},
-       { DBGBUS_DSPP, 88, 3},
-       { DBGBUS_DSPP, 88, 4},
-       { DBGBUS_DSPP, 88, 5},
-       { DBGBUS_DSPP, 88, 6},
-       { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 89, 1},
-       { DBGBUS_DSPP, 89, 2},
-       { DBGBUS_DSPP, 89, 3},
-       { DBGBUS_DSPP, 89, 4},
-       { DBGBUS_DSPP, 89, 5},
-       { DBGBUS_DSPP, 89, 6},
-       { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 90, 1},
-       { DBGBUS_DSPP, 90, 2},
-       { DBGBUS_DSPP, 90, 3},
-       { DBGBUS_DSPP, 90, 4},
-       { DBGBUS_DSPP, 90, 5},
-       { DBGBUS_DSPP, 90, 6},
-       { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 91, 1},
-       { DBGBUS_DSPP, 91, 2},
-       { DBGBUS_DSPP, 91, 3},
-       { DBGBUS_DSPP, 91, 4},
-       { DBGBUS_DSPP, 91, 5},
-       { DBGBUS_DSPP, 91, 6},
-       { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
-       /* LM2 */
-       { DBGBUS_DSPP, 77, 0},
-       { DBGBUS_DSPP, 77, 1},
-       { DBGBUS_DSPP, 77, 2},
-       { DBGBUS_DSPP, 77, 3},
-       { DBGBUS_DSPP, 77, 4},
-       { DBGBUS_DSPP, 77, 5},
-       { DBGBUS_DSPP, 77, 6},
-       { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 78, 0},
-       { DBGBUS_DSPP, 78, 1},
-       { DBGBUS_DSPP, 78, 2},
-       { DBGBUS_DSPP, 78, 3},
-       { DBGBUS_DSPP, 78, 4},
-       { DBGBUS_DSPP, 78, 5},
-       { DBGBUS_DSPP, 78, 6},
-       { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 79, 0},
-       { DBGBUS_DSPP, 79, 1},
-       { DBGBUS_DSPP, 79, 2},
-       { DBGBUS_DSPP, 79, 3},
-       { DBGBUS_DSPP, 79, 4},
-       { DBGBUS_DSPP, 79, 5},
-       { DBGBUS_DSPP, 79, 6},
-       { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 80, 0},
-       { DBGBUS_DSPP, 80, 1},
-       { DBGBUS_DSPP, 80, 2},
-       { DBGBUS_DSPP, 80, 3},
-       { DBGBUS_DSPP, 80, 4},
-       { DBGBUS_DSPP, 80, 5},
-       { DBGBUS_DSPP, 80, 6},
-       { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 81, 0},
-       { DBGBUS_DSPP, 81, 1},
-       { DBGBUS_DSPP, 81, 2},
-       { DBGBUS_DSPP, 81, 3},
-       { DBGBUS_DSPP, 81, 4},
-       { DBGBUS_DSPP, 81, 5},
-       { DBGBUS_DSPP, 81, 6},
-       { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 82, 0},
-       { DBGBUS_DSPP, 82, 1},
-       { DBGBUS_DSPP, 82, 2},
-       { DBGBUS_DSPP, 82, 3},
-       { DBGBUS_DSPP, 82, 4},
-       { DBGBUS_DSPP, 82, 5},
-       { DBGBUS_DSPP, 82, 6},
-       { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 83, 0},
-       { DBGBUS_DSPP, 83, 1},
-       { DBGBUS_DSPP, 83, 2},
-       { DBGBUS_DSPP, 83, 3},
-       { DBGBUS_DSPP, 83, 4},
-       { DBGBUS_DSPP, 83, 5},
-       { DBGBUS_DSPP, 83, 6},
-       { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 92, 1},
-       { DBGBUS_DSPP, 92, 2},
-       { DBGBUS_DSPP, 92, 3},
-       { DBGBUS_DSPP, 92, 4},
-       { DBGBUS_DSPP, 92, 5},
-       { DBGBUS_DSPP, 92, 6},
-       { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 93, 1},
-       { DBGBUS_DSPP, 93, 2},
-       { DBGBUS_DSPP, 93, 3},
-       { DBGBUS_DSPP, 93, 4},
-       { DBGBUS_DSPP, 93, 5},
-       { DBGBUS_DSPP, 93, 6},
-       { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 94, 1},
-       { DBGBUS_DSPP, 94, 2},
-       { DBGBUS_DSPP, 94, 3},
-       { DBGBUS_DSPP, 94, 4},
-       { DBGBUS_DSPP, 94, 5},
-       { DBGBUS_DSPP, 94, 6},
-       { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 95, 1},
-       { DBGBUS_DSPP, 95, 2},
-       { DBGBUS_DSPP, 95, 3},
-       { DBGBUS_DSPP, 95, 4},
-       { DBGBUS_DSPP, 95, 5},
-       { DBGBUS_DSPP, 95, 6},
-       { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
-       /* LM5 */
-       { DBGBUS_DSPP, 110, 1},
-       { DBGBUS_DSPP, 110, 2},
-       { DBGBUS_DSPP, 110, 3},
-       { DBGBUS_DSPP, 110, 4},
-       { DBGBUS_DSPP, 110, 5},
-       { DBGBUS_DSPP, 110, 6},
-       { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 111, 1},
-       { DBGBUS_DSPP, 111, 2},
-       { DBGBUS_DSPP, 111, 3},
-       { DBGBUS_DSPP, 111, 4},
-       { DBGBUS_DSPP, 111, 5},
-       { DBGBUS_DSPP, 111, 6},
-       { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 112, 1},
-       { DBGBUS_DSPP, 112, 2},
-       { DBGBUS_DSPP, 112, 3},
-       { DBGBUS_DSPP, 112, 4},
-       { DBGBUS_DSPP, 112, 5},
-       { DBGBUS_DSPP, 112, 6},
-       { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 113, 1},
-       { DBGBUS_DSPP, 113, 2},
-       { DBGBUS_DSPP, 113, 3},
-       { DBGBUS_DSPP, 113, 4},
-       { DBGBUS_DSPP, 113, 5},
-       { DBGBUS_DSPP, 113, 6},
-       { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 114, 1},
-       { DBGBUS_DSPP, 114, 2},
-       { DBGBUS_DSPP, 114, 3},
-       { DBGBUS_DSPP, 114, 4},
-       { DBGBUS_DSPP, 114, 5},
-       { DBGBUS_DSPP, 114, 6},
-       { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 115, 1},
-       { DBGBUS_DSPP, 115, 2},
-       { DBGBUS_DSPP, 115, 3},
-       { DBGBUS_DSPP, 115, 4},
-       { DBGBUS_DSPP, 115, 5},
-       { DBGBUS_DSPP, 115, 6},
-       { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 116, 1},
-       { DBGBUS_DSPP, 116, 2},
-       { DBGBUS_DSPP, 116, 3},
-       { DBGBUS_DSPP, 116, 4},
-       { DBGBUS_DSPP, 116, 5},
-       { DBGBUS_DSPP, 116, 6},
-       { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 117, 1},
-       { DBGBUS_DSPP, 117, 2},
-       { DBGBUS_DSPP, 117, 3},
-       { DBGBUS_DSPP, 117, 4},
-       { DBGBUS_DSPP, 117, 5},
-       { DBGBUS_DSPP, 117, 6},
-       { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 118, 1},
-       { DBGBUS_DSPP, 118, 2},
-       { DBGBUS_DSPP, 118, 3},
-       { DBGBUS_DSPP, 118, 4},
-       { DBGBUS_DSPP, 118, 5},
-       { DBGBUS_DSPP, 118, 6},
-       { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 119, 1},
-       { DBGBUS_DSPP, 119, 2},
-       { DBGBUS_DSPP, 119, 3},
-       { DBGBUS_DSPP, 119, 4},
-       { DBGBUS_DSPP, 119, 5},
-       { DBGBUS_DSPP, 119, 6},
-       { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
-       { DBGBUS_DSPP, 120, 1},
-       { DBGBUS_DSPP, 120, 2},
-       { DBGBUS_DSPP, 120, 3},
-       { DBGBUS_DSPP, 120, 4},
-       { DBGBUS_DSPP, 120, 5},
-       { DBGBUS_DSPP, 120, 6},
-       { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
-       /* csc */
-       { DBGBUS_SSPP0, 7, 0},
-       { DBGBUS_SSPP0, 7, 1},
-       { DBGBUS_SSPP0, 27, 0},
-       { DBGBUS_SSPP0, 27, 1},
-       { DBGBUS_SSPP1, 7, 0},
-       { DBGBUS_SSPP1, 7, 1},
-       { DBGBUS_SSPP1, 27, 0},
-       { DBGBUS_SSPP1, 27, 1},
-
-       /* pcc */
-       { DBGBUS_SSPP0, 3,  3},
-       { DBGBUS_SSPP0, 23, 3},
-       { DBGBUS_SSPP0, 33, 3},
-       { DBGBUS_SSPP0, 43, 3},
-       { DBGBUS_SSPP1, 3,  3},
-       { DBGBUS_SSPP1, 23, 3},
-       { DBGBUS_SSPP1, 33, 3},
-       { DBGBUS_SSPP1, 43, 3},
-
-       /* spa */
-       { DBGBUS_SSPP0, 8,  0},
-       { DBGBUS_SSPP0, 28, 0},
-       { DBGBUS_SSPP1, 8,  0},
-       { DBGBUS_SSPP1, 28, 0},
-       { DBGBUS_DSPP, 13, 0},
-       { DBGBUS_DSPP, 19, 0},
-
-       /* igc */
-       { DBGBUS_SSPP0, 17, 0},
-       { DBGBUS_SSPP0, 17, 1},
-       { DBGBUS_SSPP0, 17, 3},
-       { DBGBUS_SSPP0, 37, 0},
-       { DBGBUS_SSPP0, 37, 1},
-       { DBGBUS_SSPP0, 37, 3},
-       { DBGBUS_SSPP0, 46, 0},
-       { DBGBUS_SSPP0, 46, 1},
-       { DBGBUS_SSPP0, 46, 3},
-
-       { DBGBUS_SSPP1, 17, 0},
-       { DBGBUS_SSPP1, 17, 1},
-       { DBGBUS_SSPP1, 17, 3},
-       { DBGBUS_SSPP1, 37, 0},
-       { DBGBUS_SSPP1, 37, 1},
-       { DBGBUS_SSPP1, 37, 3},
-       { DBGBUS_SSPP1, 46, 0},
-       { DBGBUS_SSPP1, 46, 1},
-       { DBGBUS_SSPP1, 46, 3},
-
-       { DBGBUS_DSPP, 14, 0},
-       { DBGBUS_DSPP, 14, 1},
-       { DBGBUS_DSPP, 14, 3},
-       { DBGBUS_DSPP, 20, 0},
-       { DBGBUS_DSPP, 20, 1},
-       { DBGBUS_DSPP, 20, 3},
-
-       /* intf0-3 */
-       { DBGBUS_PERIPH, 0, 0},
-       { DBGBUS_PERIPH, 1, 0},
-       { DBGBUS_PERIPH, 2, 0},
-       { DBGBUS_PERIPH, 3, 0},
-
-       /* te counter wrapper */
-       { DBGBUS_PERIPH, 60, 0},
-
-       /* dsc0 */
-       { DBGBUS_PERIPH, 47, 0},
-       { DBGBUS_PERIPH, 47, 1},
-       { DBGBUS_PERIPH, 47, 2},
-       { DBGBUS_PERIPH, 47, 3},
-       { DBGBUS_PERIPH, 47, 4},
-       { DBGBUS_PERIPH, 47, 5},
-       { DBGBUS_PERIPH, 47, 6},
-       { DBGBUS_PERIPH, 47, 7},
-
-       /* dsc1 */
-       { DBGBUS_PERIPH, 48, 0},
-       { DBGBUS_PERIPH, 48, 1},
-       { DBGBUS_PERIPH, 48, 2},
-       { DBGBUS_PERIPH, 48, 3},
-       { DBGBUS_PERIPH, 48, 4},
-       { DBGBUS_PERIPH, 48, 5},
-       { DBGBUS_PERIPH, 48, 6},
-       { DBGBUS_PERIPH, 48, 7},
-
-       /* dsc2 */
-       { DBGBUS_PERIPH, 51, 0},
-       { DBGBUS_PERIPH, 51, 1},
-       { DBGBUS_PERIPH, 51, 2},
-       { DBGBUS_PERIPH, 51, 3},
-       { DBGBUS_PERIPH, 51, 4},
-       { DBGBUS_PERIPH, 51, 5},
-       { DBGBUS_PERIPH, 51, 6},
-       { DBGBUS_PERIPH, 51, 7},
-
-       /* dsc3 */
-       { DBGBUS_PERIPH, 52, 0},
-       { DBGBUS_PERIPH, 52, 1},
-       { DBGBUS_PERIPH, 52, 2},
-       { DBGBUS_PERIPH, 52, 3},
-       { DBGBUS_PERIPH, 52, 4},
-       { DBGBUS_PERIPH, 52, 5},
-       { DBGBUS_PERIPH, 52, 6},
-       { DBGBUS_PERIPH, 52, 7},
-
-       /* tear-check */
-       { DBGBUS_PERIPH, 63, 0 },
-       { DBGBUS_PERIPH, 64, 0 },
-       { DBGBUS_PERIPH, 65, 0 },
-       { DBGBUS_PERIPH, 73, 0 },
-       { DBGBUS_PERIPH, 74, 0 },
-
-       /* cdwn */
-       { DBGBUS_PERIPH, 80, 0},
-       { DBGBUS_PERIPH, 80, 1},
-       { DBGBUS_PERIPH, 80, 2},
-
-       { DBGBUS_PERIPH, 81, 0},
-       { DBGBUS_PERIPH, 81, 1},
-       { DBGBUS_PERIPH, 81, 2},
-
-       { DBGBUS_PERIPH, 82, 0},
-       { DBGBUS_PERIPH, 82, 1},
-       { DBGBUS_PERIPH, 82, 2},
-       { DBGBUS_PERIPH, 82, 3},
-       { DBGBUS_PERIPH, 82, 4},
-       { DBGBUS_PERIPH, 82, 5},
-       { DBGBUS_PERIPH, 82, 6},
-       { DBGBUS_PERIPH, 82, 7},
-
-       /* hdmi */
-       { DBGBUS_PERIPH, 68, 0},
-       { DBGBUS_PERIPH, 68, 1},
-       { DBGBUS_PERIPH, 68, 2},
-       { DBGBUS_PERIPH, 68, 3},
-       { DBGBUS_PERIPH, 68, 4},
-       { DBGBUS_PERIPH, 68, 5},
-
-       /* edp */
-       { DBGBUS_PERIPH, 69, 0},
-       { DBGBUS_PERIPH, 69, 1},
-       { DBGBUS_PERIPH, 69, 2},
-       { DBGBUS_PERIPH, 69, 3},
-       { DBGBUS_PERIPH, 69, 4},
-       { DBGBUS_PERIPH, 69, 5},
-
-       /* dsi0 */
-       { DBGBUS_PERIPH, 70, 0},
-       { DBGBUS_PERIPH, 70, 1},
-       { DBGBUS_PERIPH, 70, 2},
-       { DBGBUS_PERIPH, 70, 3},
-       { DBGBUS_PERIPH, 70, 4},
-       { DBGBUS_PERIPH, 70, 5},
-
-       /* dsi1 */
-       { DBGBUS_PERIPH, 71, 0},
-       { DBGBUS_PERIPH, 71, 1},
-       { DBGBUS_PERIPH, 71, 2},
-       { DBGBUS_PERIPH, 71, 3},
-       { DBGBUS_PERIPH, 71, 4},
-       { DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
-       {0x214, 0x21c, 16, 2, 0x0, 0xd},     /* arb clients */
-       {0x214, 0x21c, 16, 2, 0x80, 0xc0},   /* arb clients */
-       {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
-       {0x214, 0x21c, 0, 16, 0x0, 0xf},     /* xin blocks - axi side */
-       {0x214, 0x21c, 0, 16, 0x80, 0xa4},   /* xin blocks - axi side */
-       {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
-       {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
-       if (enable)
-               pm_runtime_get_sync(dpu_dbg_base.dev);
-       else
-               pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
-       bool in_log, in_mem;
-       u32 **dump_mem = NULL;
-       u32 *dump_addr = NULL;
-       u32 status = 0;
-       struct dpu_debug_bus_entry *head;
-       phys_addr_t phys = 0;
-       int list_size;
-       int i;
-       u32 offset;
-       void __iomem *mem_base = NULL;
-       struct dpu_dbg_reg_base *reg_base;
-
-       if (!bus || !bus->cmn.entries_size)
-               return;
-
-       list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
-                       reg_base_head)
-               if (strlen(reg_base->name) &&
-                       !strcmp(reg_base->name, bus->cmn.name))
-                       mem_base = reg_base->base + bus->top_blk_off;
-
-       if (!mem_base) {
-               pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-               return;
-       }
-
-       dump_mem = &bus->cmn.dumped_content;
-
-       /* will keep in memory 4 entries of 4 bytes each */
-       list_size = (bus->cmn.entries_size * 4 * 4);
-
-       in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
-       in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
-       if (!in_log && !in_mem)
-               return;
-
-       dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
-                       bus->cmn.name);
-
-       if (in_mem) {
-               if (!(*dump_mem))
-                       *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
-                               list_size, &phys, GFP_KERNEL);
-
-               if (*dump_mem) {
-                       dump_addr = *dump_mem;
-                       dev_info(dpu_dbg_base.dev,
-                               "%s: start_addr:0x%pK len:0x%x\n",
-                               __func__, dump_addr, list_size);
-               } else {
-                       in_mem = false;
-                       pr_err("dump_mem: allocation fails\n");
-               }
-       }
-
-       _dpu_dbg_enable_power(true);
-       for (i = 0; i < bus->cmn.entries_size; i++) {
-               head = bus->entries + i;
-               writel_relaxed(TEST_MASK(head->block_id, head->test_id),
-                               mem_base + head->wr_addr);
-               wmb(); /* make sure test bits were written */
-
-               if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
-                       offset = DBGBUS_DSPP_STATUS;
-                       /* keep DSPP test point enabled */
-                       if (head->wr_addr != DBGBUS_DSPP)
-                               writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
-               } else {
-                       offset = head->wr_addr + 0x4;
-               }
-
-               status = readl_relaxed(mem_base + offset);
-
-               if (in_log)
-                       dev_info(dpu_dbg_base.dev,
-                                       "waddr=0x%x blk=%d tst=%d val=0x%x\n",
-                                       head->wr_addr, head->block_id,
-                                       head->test_id, status);
-
-               if (dump_addr && in_mem) {
-                       dump_addr[i*4]     = head->wr_addr;
-                       dump_addr[i*4 + 1] = head->block_id;
-                       dump_addr[i*4 + 2] = head->test_id;
-                       dump_addr[i*4 + 3] = status;
-               }
-
-               if (head->analyzer)
-                       head->analyzer(mem_base, head, status);
-
-               /* Disable debug bus once we are done */
-               writel_relaxed(0, mem_base + head->wr_addr);
-               if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
-                                               head->wr_addr != DBGBUS_DSPP)
-                       writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
-       }
-       _dpu_dbg_enable_power(false);
-
-       dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
-                       bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
-               struct vbif_debug_bus_entry *head, void __iomem *mem_base,
-               u32 *dump_addr, bool in_log)
-{
-       int i, j;
-       u32 val;
-
-       if (!dump_addr && !in_log)
-               return;
-
-       for (i = 0; i < head->block_cnt; i++) {
-               writel_relaxed(1 << (i + head->bit_offset),
-                               mem_base + head->block_bus_addr);
-               /* make sure that current bus blcok enable */
-               wmb();
-               for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
-                       writel_relaxed(j, mem_base + head->block_bus_addr + 4);
-                       /* make sure that test point is enabled */
-                       wmb();
-                       val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
-                       if (dump_addr) {
-                               *dump_addr++ = head->block_bus_addr;
-                               *dump_addr++ = i;
-                               *dump_addr++ = j;
-                               *dump_addr++ = val;
-                       }
-                       if (in_log)
-                               dev_info(dpu_dbg_base.dev,
-                                       "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
-                                       head->block_bus_addr, i, j, val);
-               }
-       }
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
-       bool in_log, in_mem;
-       u32 **dump_mem = NULL;
-       u32 *dump_addr = NULL;
-       u32 value, d0, d1;
-       unsigned long reg, reg1, reg2;
-       struct vbif_debug_bus_entry *head;
-       phys_addr_t phys = 0;
-       int i, list_size = 0;
-       void __iomem *mem_base = NULL;
-       struct vbif_debug_bus_entry *dbg_bus;
-       u32 bus_size;
-       struct dpu_dbg_reg_base *reg_base;
-
-       if (!bus || !bus->cmn.entries_size)
-               return;
-
-       list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
-                       reg_base_head)
-               if (strlen(reg_base->name) &&
-                       !strcmp(reg_base->name, bus->cmn.name))
-                       mem_base = reg_base->base;
-
-       if (!mem_base) {
-               pr_err("unable to find mem_base for %s\n", bus->cmn.name);
-               return;
-       }
-
-       dbg_bus = bus->entries;
-       bus_size = bus->cmn.entries_size;
-       list_size = bus->cmn.entries_size;
-       dump_mem = &bus->cmn.dumped_content;
-
-       dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
-                       bus->cmn.name);
-
-       if (!dump_mem || !dbg_bus || !bus_size || !list_size)
-               return;
-
-       /* allocate memory for each test point */
-       for (i = 0; i < bus_size; i++) {
-               head = dbg_bus + i;
-               list_size += (head->block_cnt * head->test_pnt_cnt);
-       }
-
-       /* 4 bytes * 4 entries for each test point*/
-       list_size *= 16;
-
-       in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
-       in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
-       if (!in_log && !in_mem)
-               return;
-
-       if (in_mem) {
-               if (!(*dump_mem))
-                       *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
-                               list_size, &phys, GFP_KERNEL);
-
-               if (*dump_mem) {
-                       dump_addr = *dump_mem;
-                       dev_info(dpu_dbg_base.dev,
-                               "%s: start_addr:0x%pK len:0x%x\n",
-                               __func__, dump_addr, list_size);
-               } else {
-                       in_mem = false;
-                       pr_err("dump_mem: allocation fails\n");
-               }
-       }
-
-       _dpu_dbg_enable_power(true);
-
-       value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
-       writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
-       /* make sure that vbif core is on */
-       wmb();
-
-       /**
-        * Extract VBIF error info based on XIN halt and error status.
-        * If the XIN client is not in HALT state, or an error is detected,
-        * then retrieve the VBIF error info for it.
-        */
-       reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
-       reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
-       reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
-       dev_err(dpu_dbg_base.dev,
-                       "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
-                       reg, reg1, reg2);
-       reg >>= 16;
-       reg &= ~(reg1 | reg2);
-       for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
-               if (!test_bit(0, &reg)) {
-                       writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
-                       /* make sure reg write goes through */
-                       wmb();
-
-                       d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
-                       d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
-                       dev_err(dpu_dbg_base.dev,
-                                       "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
-                                       i, d0, d1);
-               }
-               reg >>= 1;
-       }
-
-       for (i = 0; i < bus_size; i++) {
-               head = dbg_bus + i;
-
-               writel_relaxed(0, mem_base + head->disable_bus_addr);
-               writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
-               /* make sure that other bus is off */
-               wmb();
-
-               _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
-                               in_log);
-               if (dump_addr)
-                       dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
-       }
-
-       _dpu_dbg_enable_power(false);
-
-       dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
-                       bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
-                           bool dump_dbgbus_vbif_rt)
-{
-       if (dump_dbgbus_dpu)
-               _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
-       if (dump_dbgbus_vbif_rt)
-               _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
-       _dpu_dump_array("dpudump_workitem",
-               dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
-               dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
-                 bool dump_dbgbus_vbif_rt)
-{
-       if (queue_work && work_pending(&dpu_dbg_base.dump_work))
-               return;
-
-       if (!queue_work) {
-               _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
-               return;
-       }
-
-       /* schedule work to dump later */
-       dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
-       dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
-                       dump_dbgbus_vbif_rt;
-       schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
-       /* non-seekable */
-       file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-       file->private_data = inode->i_private;
-       return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
-       const char __user *user_buf, size_t count, loff_t *ppos)
-{
-       _dpu_dump_array("dump_debugfs", true, true);
-       return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
-       .open = dpu_dbg_debugfs_open,
-       .write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-       static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-       char debug_name[80] = "";
-
-       if (!debugfs_root)
-               return -EINVAL;
-
-       debugfs_create_file("dump", 0600, debugfs_root, NULL,
-                       &dpu_dbg_dump_fops);
-
-       if (dbg->dbgbus_dpu.entries) {
-               dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
-               snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-                               dbg->dbgbus_dpu.cmn.name);
-               dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
-               debugfs_create_u32(debug_name, 0600, debugfs_root,
-                               &dbg->dbgbus_dpu.cmn.enable_mask);
-       }
-
-       if (dbg->dbgbus_vbif_rt.entries) {
-               dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
-               snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
-                               dbg->dbgbus_vbif_rt.cmn.name);
-               dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-               debugfs_create_u32(debug_name, 0600, debugfs_root,
-                               &dbg->dbgbus_vbif_rt.cmn.enable_mask);
-       }
-
-       return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-       static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
-       memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
-       memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
-       if (IS_MSM8998_TARGET(hwversion)) {
-               dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
-               dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
-               dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-               dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-               dbg->dbgbus_vbif_rt.cmn.entries_size =
-                               ARRAY_SIZE(vbif_dbg_bus_msm8998);
-       } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
-               dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
-               dbg->dbgbus_dpu.cmn.entries_size =
-                               ARRAY_SIZE(dbg_bus_dpu_sdm845);
-               dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
-               /* vbif is unchanged vs 8998 */
-               dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
-               dbg->dbgbus_vbif_rt.cmn.entries_size =
-                               ARRAY_SIZE(vbif_dbg_bus_msm8998);
-       } else {
-               pr_err("unsupported chipset id %X\n", hwversion);
-       }
-}
-
-int dpu_dbg_init(struct device *dev)
-{
-       if (!dev) {
-               pr_err("invalid params\n");
-               return -EINVAL;
-       }
-
-       INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
-       dpu_dbg_base.dev = dev;
-
-       INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
-       return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
-       _dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-       dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644 (file)
index 1e6fa94..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
-       DPU_DBG_DUMP_IN_LOG = BIT(0),
-       DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion:         Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev:               device handle
- * Returns:            0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root:      debugfs root in which to create dpu debug entries
- * Returns:    0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns:    none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work:          whether to queue the dumping work to the work_struct
- * @name:        string indicating origin of dump
- * @dump_dbgbus:  dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns:    none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
-                 bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- *     address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
-       return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
-       return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
-                               bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
index 96cdf06e7da21d8f5bf576014f149f2efbbc7298..0dda4a603685da03964d3794ed0555299c9b03fe 100644 (file)
@@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
  *     Virtual encoder defers as much as possible to the physical encoders.
  *     Virtual encoder registers itself with the DRM Framework as the encoder.
  * @base:              drm_encoder base class for registration with DRM
- * @enc_spin_lock:     Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock:      Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @bus_scaling_client:        Client handle to the bus scaling interface
+ * @enabled:           True if the encoder is active, protected by enc_lock
  * @num_phys_encs:     Actual number of physical encoders contained.
  * @phys_encs:         Container of physical encoders managed.
  * @cur_master:                Pointer to the current master in this mode. Optimization
@@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
  * @intfs_swapped      Whether or not the phys_enc interfaces have been swapped
  *                     for partial update right-only cases, such as pingpong
  *                     split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb:    Callback into the upper layer / CRTC for
- *                     notification of the VBLANK
- * @crtc_vblank_cb_data:       Data from upper layer for VBLANK notification
+ * @crtc:              Pointer to the currently assigned crtc. Normally you
+ *                     would use crtc->state->encoder_mask to determine the
+ *                     link between encoder/crtc. However in this case we need
+ *                     to track crtc in the disable() hook which is called
+ *                     _after_ encoder_mask is cleared.
  * @crtc_kickoff_cb:           Callback into CRTC that will flush & start
  *                             all CTL paths
  * @crtc_kickoff_cb_data:      Opaque user data given to crtc_kickoff_cb
  * @debugfs_root:              Debug file system root file node
- * @enc_lock:                  Lock around physical encoder create/destroy and
-                               access.
+ * @enc_lock:                  Lock around physical encoder
+ *                             create/destroy/enable/disable
  * @frame_busy_mask:           Bitmask tracking which phys_enc we are still
  *                             busy processing current command.
  *                             Bit0 = phys_encs[0] etc.
@@ -175,6 +178,8 @@ struct dpu_encoder_virt {
        spinlock_t enc_spinlock;
        uint32_t bus_scaling_client;
 
+       bool enabled;
+
        unsigned int num_phys_encs;
        struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
        struct dpu_encoder_phys *cur_master;
@@ -183,8 +188,7 @@ struct dpu_encoder_virt {
 
        bool intfs_swapped;
 
-       void (*crtc_vblank_cb)(void *);
-       void *crtc_vblank_cb_data;
+       struct drm_crtc *crtc;
 
        struct dentry *debugfs_root;
        struct mutex enc_lock;
@@ -210,39 +214,6 @@ struct dpu_encoder_virt {
 };
 
 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
-                                                               bool enable)
-{
-       struct drm_encoder *drm_enc;
-       struct msm_drm_private *priv;
-       struct dpu_kms *dpu_kms;
-
-       if (!dpu_enc) {
-               DPU_ERROR("invalid dpu enc\n");
-               return -EINVAL;
-       }
-
-       drm_enc = &dpu_enc->base;
-       if (!drm_enc->dev || !drm_enc->dev->dev_private) {
-               DPU_ERROR("drm device invalid\n");
-               return -EINVAL;
-       }
-
-       priv = drm_enc->dev->dev_private;
-       if (!priv->kms) {
-               DPU_ERROR("invalid kms\n");
-               return -EINVAL;
-       }
-
-       dpu_kms = to_dpu_kms(priv->kms);
-
-       if (enable)
-               pm_runtime_get_sync(&dpu_kms->pdev->dev);
-       else
-               pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-       return 0;
-}
 
 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
                enum dpu_intr_idx intr_idx)
@@ -1119,28 +1090,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
        _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
 }
 
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
 {
-       struct dpu_encoder_virt *dpu_enc = NULL;
-       int i;
-
-       if (!drm_enc) {
-               DPU_ERROR("invalid encoder\n");
-               return;
-       }
-       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
 
-       for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-               struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+       mutex_lock(&dpu_enc->enc_lock);
 
-               if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
-                       phys->ops.restore(phys);
-       }
+       if (!dpu_enc->enabled)
+               goto out;
 
+       if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+               dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
        if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
                dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
 
        _dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+       mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@@ -1154,6 +1121,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
                return;
        }
        dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+       mutex_lock(&dpu_enc->enc_lock);
        cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
 
        trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@@ -1170,10 +1139,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
        if (ret) {
                DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
                                ret);
-               return;
+               goto out;
        }
 
        _dpu_encoder_virt_enable_helper(drm_enc);
+
+       dpu_enc->enabled = true;
+
+out:
+       mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1195,11 +1169,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
                return;
        }
 
-       mode = &drm_enc->crtc->state->adjusted_mode;
-
        dpu_enc = to_dpu_encoder_virt(drm_enc);
        DPU_DEBUG_ENC(dpu_enc, "\n");
 
+       mutex_lock(&dpu_enc->enc_lock);
+       dpu_enc->enabled = false;
+
+       mode = &drm_enc->crtc->state->adjusted_mode;
+
        priv = drm_enc->dev->dev_private;
        dpu_kms = to_dpu_kms(priv->kms);
 
@@ -1233,6 +1210,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
        DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
 
        dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+       mutex_unlock(&dpu_enc->enc_lock);
 }
 
 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1263,8 +1242,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
        dpu_enc = to_dpu_encoder_virt(drm_enc);
 
        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
-       if (dpu_enc->crtc_vblank_cb)
-               dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+       if (dpu_enc->crtc)
+               dpu_crtc_vblank_callback(dpu_enc->crtc);
        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 
        atomic_inc(&phy_enc->vsync_cnt);
@@ -1284,25 +1263,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
        DPU_ATRACE_END("encoder_underrun_callback");
 }
 
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
-               void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
 {
        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
        unsigned long lock_flags;
-       bool enable;
-       int i;
 
-       enable = vbl_cb ? true : false;
+       spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+       /* crtc should always be cleared before re-assigning */
+       WARN_ON(crtc && dpu_enc->crtc);
+       dpu_enc->crtc = crtc;
+       spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+                                       struct drm_crtc *crtc, bool enable)
+{
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+       unsigned long lock_flags;
+       int i;
 
-       if (!drm_enc) {
-               DPU_ERROR("invalid encoder\n");
-               return;
-       }
        trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
 
        spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
-       dpu_enc->crtc_vblank_cb = vbl_cb;
-       dpu_enc->crtc_vblank_cb_data = vbl_data;
+       if (dpu_enc->crtc != crtc) {
+               spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+               return;
+       }
        spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1407,8 +1393,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
  * phys: Pointer to physical encoder structure
  * extra_flush_bits: Additional bit mask to include in flush trigger
  */
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
-               struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+               struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
+               bool async)
 {
        struct dpu_hw_ctl *ctl;
        int pending_kickoff_cnt;
@@ -1431,7 +1418,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
                return;
        }
 
-       pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+       if (!async)
+               pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+       else
+               pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
 
        if (extra_flush_bits && ctl->ops.update_pending_flush)
                ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1450,7 +1440,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
  * _dpu_encoder_trigger_start - trigger start for a physical encoder
  * phys: Pointer to physical encoder structure
  */
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
 {
        if (!phys) {
                DPU_ERROR("invalid argument(s)\n");
@@ -1507,7 +1497,7 @@ static int dpu_encoder_helper_wait_event_timeout(
        return rc;
 }
 
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
 {
        struct dpu_encoder_virt *dpu_enc;
        struct dpu_hw_ctl *ctl;
@@ -1527,10 +1517,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
                      ctl->idx);
 
        rc = ctl->ops.reset(ctl);
-       if (rc) {
+       if (rc)
                DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n",  ctl->idx);
-               dpu_dbg_dump(false, __func__, true, true);
-       }
 
        phys_enc->enable_state = DPU_ENC_ENABLED;
 }
@@ -1544,7 +1532,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
  *     a time.
  * dpu_enc: Pointer to virtual encoder structure
  */
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+                                     bool async)
 {
        struct dpu_hw_ctl *ctl;
        uint32_t i, pending_flush;
@@ -1575,7 +1564,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
                        set_bit(i, dpu_enc->frame_busy_mask);
                if (!phys->ops.needs_single_flush ||
                                !phys->ops.needs_single_flush(phys))
-                       _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+                       _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+                                                  async);
                else if (ctl->ops.get_pending_flush)
                        pending_flush |= ctl->ops.get_pending_flush(ctl);
        }
@@ -1585,7 +1575,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
                _dpu_encoder_trigger_flush(
                                &dpu_enc->base,
                                dpu_enc->cur_master,
-                               pending_flush);
+                               pending_flush, async);
        }
 
        _dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1769,7 +1759,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
 }
 
 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
-               struct dpu_encoder_kickoff_params *params)
+               struct dpu_encoder_kickoff_params *params, bool async)
 {
        struct dpu_encoder_virt *dpu_enc;
        struct dpu_encoder_phys *phys;
@@ -1803,14 +1793,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
        if (needs_hw_reset) {
                trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
                for (i = 0; i < dpu_enc->num_phys_encs; i++) {
-                       phys = dpu_enc->phys_encs[i];
-                       if (phys && phys->ops.hw_reset)
-                               phys->ops.hw_reset(phys);
+                       dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
                }
        }
 }
 
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
 {
        struct dpu_encoder_virt *dpu_enc;
        struct dpu_encoder_phys *phys;
@@ -1833,7 +1821,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
                ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
 
        /* All phys encs are ready to go, trigger the kickoff */
-       _dpu_encoder_kickoff_phys(dpu_enc);
+       _dpu_encoder_kickoff_phys(dpu_enc, async);
 
        /* allow phys encs to handle any post-kickoff business */
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1875,14 +1863,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
 #ifdef CONFIG_DEBUG_FS
 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
 {
-       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_virt *dpu_enc = s->private;
        int i;
 
-       if (!s || !s->private)
-               return -EINVAL;
-
-       dpu_enc = s->private;
-
        mutex_lock(&dpu_enc->enc_lock);
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
                struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1920,7 +1903,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
 
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
-       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
        struct msm_drm_private *priv;
        struct dpu_kms *dpu_kms;
        int i;
@@ -1934,12 +1917,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
        char name[DPU_NAME_SIZE];
 
-       if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+       if (!drm_enc->dev || !drm_enc->dev->dev_private) {
                DPU_ERROR("invalid encoder or kms\n");
                return -EINVAL;
        }
 
-       dpu_enc = to_dpu_encoder_virt(drm_enc);
        priv = drm_enc->dev->dev_private;
        dpu_kms = to_dpu_kms(priv->kms);
 
@@ -1964,26 +1946,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
        return 0;
 }
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-       struct dpu_encoder_virt *dpu_enc;
-
-       if (!drm_enc)
-               return;
-
-       dpu_enc = to_dpu_encoder_virt(drm_enc);
-       debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
 #else
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
        return 0;
 }
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
 #endif
 
 static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -1993,7 +1960,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
 
 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
 {
-       _dpu_encoder_destroy_debugfs(encoder);
+       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+       debugfs_remove_recursive(dpu_enc->debugfs_root);
 }
 
 static int dpu_encoder_virt_add_phys_encs(
@@ -2268,6 +2237,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
 
+       dpu_enc->enabled = false;
+
        return &dpu_enc->base;
 }
 
index 9dbf38f446d936ce39bdf01e8c66fd521443fbe4..3f5dafe00580e268d0bd79327d0c9d3f65c5956e 100644 (file)
@@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
                                  struct dpu_encoder_hw_resources *hw_res);
 
 /**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- *     will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
  * @encoder:   encoder pointer
- * @cb:                callback pointer, provide NULL to deregister and disable IRQs
- * @data:      user data provided to callback
+ * @crtc:      crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+                            struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ *     the encoder is assigned to the given crtc
+ * @encoder:   encoder pointer
+ * @crtc:      crtc pointer
+ * @enable:    true if vblank should be enabled
  */
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
-               void (*cb)(void *), void *data);
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+                                       struct drm_crtc *crtc, bool enable);
 
 /**
  * dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
  *     Delayed: Block until next trigger can be issued.
  * @encoder:   encoder pointer
  * @params:    kickoff time parameters
+ * @async:     true if this is an asynchronous commit
  */
 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
-               struct dpu_encoder_kickoff_params *params);
+               struct dpu_encoder_kickoff_params *params, bool async);
 
 /**
  * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
  * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
  *     (i.e. ctl flush and start) immediately.
  * @encoder:   encoder pointer
+ * @async:     true if this is an asynchronous commit
  */
-void dpu_encoder_kickoff(struct drm_encoder *encoder);
+void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
 
 /**
  * dpu_encoder_wait_for_event - Waits for encoder events
@@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
 
 /**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
  * @encoder:   encoder pointer
  */
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
 
 /**
  * dpu_encoder_init - initialize virtual encoder object
index 964efcc757a4dcb53b9cae05e688ae9745f9089d..44e6f8b68e70d9fa5ab71075ec7a803356cc44f7 100644 (file)
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
  * @handle_post_kickoff:       Do any work necessary post-kickoff work
  * @trigger_start:             Process start event on physical encoder
  * @needs_single_flush:                Whether encoder slaves need to be flushed
- * @hw_reset:                  Issue HW recovery such as CTL reset and clear
- *                             DPU_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:               Handler to enable/disable all the encoder IRQs
  * @prepare_idle_pc:           phys encoder can update the vsync_enable status
  *                              on idle power collapse prepare
@@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
        void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
        void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
        bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
-       void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
        void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
        void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
        void (*restore)(struct dpu_encoder_phys *phys);
@@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
  */
 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
 
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- *     This helper function may be optionally specified by physical
- *     encoders if they require ctl hw reset. If state is currently
- *     DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
 static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
                struct dpu_encoder_phys *phys_enc)
 {
@@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
        dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
 
        if (phys_enc->split_role == ENC_ROLE_SOLO &&
-           dpu_crtc_state_is_stereo(dpu_cstate))
+           dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
                return BLEND_3D_H_ROW_INT;
 
        return BLEND_3D_NONE;
index b2d7f0ded24c051247869eeebb71f36615524083..99ab5ca9bed3b721f26e2471705a6fe1d616b3a5 100644 (file)
 
 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
 
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
-               struct dpu_encoder_phys_cmd *cmd_enc)
-{
-       return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
-               struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
 {
        return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
 }
@@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
                          atomic_read(&phys_enc->pending_kickoff_cnt));
 
                dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
-               dpu_dbg_dump(false, __func__, true, true);
        }
 
        atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
        _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
 
        if (!dpu_encoder_phys_cmd_is_master(phys_enc))
-               goto skip_flush;
+               return;
 
        ctl = phys_enc->hw_ctl;
        ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
        ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
-       return;
 }
 
 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
 
        wait_info.wq = &cmd_enc->pending_vblank_wq;
        wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
-       wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+       wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
 
        atomic_inc(&cmd_enc->pending_vblank_cnt);
 
@@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
        ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
        ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
        ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
-       ops->hw_reset = dpu_encoder_helper_hw_reset;
        ops->irq_control = dpu_encoder_phys_cmd_irq_control;
        ops->restore = dpu_encoder_phys_cmd_enable_helper;
        ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
        if (!cmd_enc) {
                ret = -ENOMEM;
                DPU_ERROR("failed to allocate\n");
-               goto fail;
+               return ERR_PTR(ret);
        }
        phys_enc = &cmd_enc->base;
        phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
 
        return phys_enc;
 
-fail:
        return ERR_PTR(ret);
 }
index 84de385a9f6223881433bec56a38674598fb9d42..acdab5b0db18b5c776ffb8ee2e52df524b0c55d9 100644 (file)
@@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
         */
 }
 
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
 {
        u32 active = timing->xres;
        u32 inactive =
@@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
        return active + inactive;
 }
 
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
 {
        u32 active = timing->yres;
        u32 inactive =
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
        if (hw_ctl && hw_ctl->ops.get_flush_register)
                flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
-       if (flush_register == 0)
+       if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
                new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
                                -1, 0);
        spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
                DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
                                ctl->idx, rc);
                dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
-               dpu_dbg_dump(false, __func__, true, true);
        }
 }
 
@@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
        ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
        ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
        ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
-       ops->hw_reset = dpu_encoder_helper_hw_reset;
        ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
 }
 
index bfcd165e96dfe98d8f6ad16cdadc19017c3e7042..0aa9b8e1ae70783890da02f276bc3702289807c8 100644 (file)
@@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
                        + layout->plane_size[2] + layout->plane_size[3];
 
                if (!meta)
-                       goto done;
+                       return 0;
 
                /* configure Y metadata plane */
                layout->plane_addr[2] = base_addr;
@@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
                layout->plane_addr[1] = 0;
 
                if (!meta)
-                       goto done;
+                       return 0;
 
                layout->plane_addr[2] = base_addr;
                layout->plane_addr[3] = 0;
        }
-done:
        return 0;
 }
 
index 58d29e43faefde4f8608b0464c43b5ce47ab1fa6..92f1c4241b9aace6ad51a3de18030c7b7e2684d9 100644 (file)
@@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
  * @type: hw block type - enum dpu_hw_blk_type
  * @id: instance id of the hw block
  * @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
  */
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
                struct dpu_hw_blk_ops *ops)
 {
-       if (!hw_blk) {
-               pr_err("invalid parameters\n");
-               return -EINVAL;
-       }
-
        INIT_LIST_HEAD(&hw_blk->list);
        hw_blk->type = type;
        hw_blk->id = id;
@@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
        mutex_lock(&dpu_hw_blk_lock);
        list_add(&hw_blk->list, &dpu_hw_blk_list);
        mutex_unlock(&dpu_hw_blk_lock);
-
-       return 0;
 }
 
 /**
index 0f4ca8af1ec5a5738f78651c6230a84ead11d725..1934c2f7e8fa684ca7ad0bf5740823c5b7e381c1 100644 (file)
@@ -44,7 +44,7 @@ struct dpu_hw_blk {
        struct dpu_hw_blk_ops ops;
 };
 
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
                struct dpu_hw_blk_ops *ops);
 void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
 
index dc060e7358e44df3e223631d44565c53d9355e9d..144358a3d0fb6a9f9a2242644dfccafd65067c70 100644 (file)
@@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
  */
 void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
 
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg:          pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
-       return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
-                        test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
 #endif /* _DPU_HW_CATALOG_H */
index eec1051f2afc4593d20ffecec59af8beb13bb020..1068b4b7940f02c59ea7884a604f6ebbde6ae9cb 100644 (file)
@@ -13,8 +13,8 @@
 #include <linux/delay.h>
 #include "dpu_hwio.h"
 #include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
+#include "dpu_trace.h"
 
 #define   CTL_LAYER(lm)                 \
        (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
        return stages;
 }
 
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+       struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+       return DPU_REG_READ(c, CTL_FLUSH);
+}
+
 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
 {
+       trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+                                      dpu_hw_ctl_get_flush_register(ctx));
        DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
 }
 
 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
 {
+       trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+                                        dpu_hw_ctl_get_flush_register(ctx));
        DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
 }
 
 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
 {
+       trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+                                    dpu_hw_ctl_get_flush_register(ctx));
        ctx->pending_flush_mask = 0x0;
 }
 
 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
                u32 flushbits)
 {
+       trace_dpu_hw_ctl_update_pending_flush(flushbits,
+                                             ctx->pending_flush_mask);
        ctx->pending_flush_mask |= flushbits;
 }
 
@@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
 
 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
 {
-
+       trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+                                    dpu_hw_ctl_get_flush_register(ctx));
        DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
 }
 
-static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
-{
-       struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
-       return DPU_REG_READ(c, CTL_FLUSH);
-}
-
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
        enum dpu_sspp sspp)
 {
        uint32_t flushbits = 0;
@@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
        return flushbits;
 }
 
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
        enum dpu_lm lm)
 {
        uint32_t flushbits = 0;
@@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
        return flushbits;
 }
 
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
                u32 *flushbits, enum dpu_intf intf)
 {
        switch (intf) {
@@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
        ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
                void __iomem *addr,
@@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
 {
        struct dpu_hw_ctl *c;
        struct dpu_ctl_cfg *cfg;
-       int rc;
 
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
@@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
        c->mixer_count = m->mixer_count;
        c->mixer_hw_caps = m->mixer;
 
-       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
+       dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
 
        return c;
-
-blk_init_error:
-       kzfree(c);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
index 9c6bba0ac7c30f18557d67e479047f3a936b2bb6..f6a83daa385b2123df7c7f6acaf71c3d7430ade0 100644 (file)
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define INTF_TIMING_ENGINE_EN           0x000
@@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
        ops->get_line_count = dpu_hw_intf_get_line_count;
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
                void __iomem *addr,
@@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
 {
        struct dpu_hw_intf *c;
        struct dpu_intf_cfg *cfg;
-       int rc;
 
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
@@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
        c->mdss = m;
        _setup_intf_ops(&c->ops, c->cap->features);
 
-       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
+       dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
 
        return c;
-
-blk_init_error:
-       kzfree(c);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
index 3b77df460deaf2924279114ce4c2713187acf1b7..a2b0dbc23058b6092300fe2960cf552b2b40ed0f 100644 (file)
@@ -91,16 +91,6 @@ struct dpu_hw_intf {
        struct dpu_hw_intf_ops ops;
 };
 
-/**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
-       return container_of(hw, struct dpu_hw_intf, base);
-}
-
 /**
  * dpu_hw_intf_init(): Initializes the intf driver for the passed
  * interface idx.
index acb8dc8acaa59687d075f01e00480dae80992d9a..018df2c3b7ed61a016a7194883e306cff1f52ab4 100644 (file)
@@ -15,7 +15,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define LM_OP_MODE                        0x00
@@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
 static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
 {
        const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
-       int rc;
+       if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+               return sblk->blendstage_base[stage - DPU_STAGE_0];
 
-       if (stage == DPU_STAGE_BASE)
-               rc = -EINVAL;
-       else if (stage <= sblk->maxblendstages)
-               rc = sblk->blendstage_base[stage - DPU_STAGE_0];
-       else
-               rc = -EINVAL;
-
-       return rc;
+       return -EINVAL;
 }
 
 static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
        DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
 }
 
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
-                       void *cfg)
-{
-}
-
 static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
                struct dpu_hw_lm_ops *ops,
                unsigned long features)
@@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
                ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
        ops->setup_alpha_out = dpu_hw_lm_setup_color3;
        ops->setup_border_color = dpu_hw_lm_setup_border_color;
-       ops->setup_gc = dpu_hw_lm_gc;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
                void __iomem *addr,
@@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
 {
        struct dpu_hw_mixer *c;
        struct dpu_lm_cfg *cfg;
-       int rc;
 
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
@@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
        c->cap = cfg;
        _setup_mixer_ops(m, &c->ops, c->cap->features);
 
-       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
+       dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
 
        return c;
-
-blk_init_error:
-       kzfree(c);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
index 5b036aca83408f11ee112cf7a7d172b1b9a67725..6aee839a6a234636eff0eec0d5f0044ab1678ed6 100644 (file)
@@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
        void (*setup_border_color)(struct dpu_hw_mixer *ctx,
                struct dpu_mdss_color *color,
                u8 border_en);
-       /**
-        * setup_gc : enable/disable gamma correction feature
-        */
-       void (*setup_gc)(struct dpu_hw_mixer *mixer,
-                       void *cfg);
 };
 
 struct dpu_hw_mixer {
index cc3a623903f4f90de1fc8d69f9ec7704139010a2..3bdf47ed1845166811e9885eaaaa9a116456699b 100644 (file)
@@ -16,7 +16,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 #include "dpu_trace.h"
 
@@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
        height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
 
        if (height < init)
-               goto line_count_exit;
+               return line;
 
        line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
 
@@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
        else
                line -= init;
 
-line_count_exit:
        return line;
 }
 
@@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
        ops->get_line_count = dpu_hw_pp_get_line_count;
 };
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
                void __iomem *addr,
@@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
 {
        struct dpu_hw_pingpong *c;
        struct dpu_pingpong_cfg *cfg;
-       int rc;
 
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
@@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
        c->caps = cfg;
        _setup_pingpong_ops(&c->ops, c->caps);
 
-       rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
+       dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
 
        return c;
-
-blk_init_error:
-       kzfree(c);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
index 3caccd7d6a3e1fa36436ddd83c62cedb1894ae81..0e02e43cee148749135b71bf8458730ffdb91b0d 100644 (file)
@@ -104,16 +104,6 @@ struct dpu_hw_pingpong {
        struct dpu_hw_pingpong_ops ops;
 };
 
-/**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
-       return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
 /**
  * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
  *     pingpong idx.
index c25b52a6b21989828cd8e76428342fd0734e1469..e9132bf5166be8ae8b044d3a514f056564547eef 100644 (file)
@@ -14,7 +14,6 @@
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define DPU_FETCH_CONFIG_RESET_VALUE   0x00000087
 /* traffic shaper clock in Hz */
 #define TS_CLK                 19200000
 
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
                int s_id,
                u32 *idx)
 {
@@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
                test_bit(DPU_SSPP_CSC_10BIT, &features))
                c->ops.setup_csc = dpu_hw_sspp_setup_csc;
 
-       if (dpu_hw_sspp_multirect_enabled(c->cap))
+       if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+               test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
                c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
 
        if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
        return ERR_PTR(-ENOMEM);
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
                void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
 {
        struct dpu_hw_pipe *hw_pipe;
        struct dpu_sspp_cfg *cfg;
-       int rc;
 
        if (!addr || !catalog)
                return ERR_PTR(-EINVAL);
@@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
        hw_pipe->cap = cfg;
        _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
 
-       rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
+       dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
 
        return hw_pipe;
-
-blk_init_error:
-       kzfree(hw_pipe);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
index 4d81e5f5ce1b45974cb88c4b1fb490f4abbd5b25..119b4e1c16be752dc3e78381f694d46c36a65c4b 100644 (file)
@@ -391,16 +391,6 @@ struct dpu_hw_pipe {
        struct dpu_hw_sspp_ops ops;
 };
 
-/**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
-       return container_of(hw, struct dpu_hw_pipe, base);
-}
-
 /**
  * dpu_hw_sspp_init - initializes the sspp hw driver object.
  * Should be called once before accessing every pipe.
index b8781256e21b1028c00005864a3f2c974c29e8f6..a041597bb849f79bb9577dda684a3a6db6e78918 100644 (file)
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_top.h"
-#include "dpu_dbg.h"
 #include "dpu_kms.h"
 
 #define SSPP_SPARE                        0x28
@@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
        return ERR_PTR(-EINVAL);
 }
 
-static struct dpu_hw_blk_ops dpu_hw_ops = {
-       .start = NULL,
-       .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
 
 struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
                void __iomem *addr,
@@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
 {
        struct dpu_hw_mdp *mdp;
        const struct dpu_mdp_cfg *cfg;
-       int rc;
 
        if (!addr || !m)
                return ERR_PTR(-EINVAL);
@@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
        mdp->caps = cfg;
        _setup_mdp_ops(&mdp->ops, mdp->caps->features);
 
-       rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
-       if (rc) {
-               DPU_ERROR("failed to init hw blk %d\n", rc);
-               goto blk_init_error;
-       }
-
-       dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+       dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
 
        return mdp;
-
-blk_init_error:
-       kzfree(mdp);
-
-       return ERR_PTR(rc);
 }
 
 void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
index 192e338f20bbf1c8c8d9b9298d0e602ecfd980d7..aa21fd834398f065cdf76f1b37db50f98de74a90 100644 (file)
@@ -160,16 +160,6 @@ struct dpu_hw_mdp {
        struct dpu_hw_mdp_ops ops;
 };
 
-/**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
-       return container_of(hw, struct dpu_hw_mdp, base);
-}
-
 /**
  * dpu_hw_mdptop_init - initializes the top driver for the passed idx
  * @idx:  Interface index for which driver object is required
index d43905525f92660179fdd2963023db33a817034e..38bfd222ed72a39c61cf3d4465dff18605755d12 100644 (file)
@@ -13,7 +13,6 @@
 #include "dpu_hwio.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
 
 #define VBIF_VERSION                   0x0000
 #define VBIF_CLK_FORCE_CTRL0           0x0008
index b557687b1964e49b8fb5eec078d200a5d6ac739e..78833c2c27f850682ce45e743388fd2d744726b7 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 
+#include <drm/drm_print.h>
+
 #include "dpu_io_util.h"
 
 void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
                                                   "clock-names", i,
                                                   &clock_name);
                if (rc) {
-                       dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+                       DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
                                i);
                        break;
                }
@@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
 
        rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
        if (rc) {
-               dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+               DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
                goto err;
        }
 
        rc = of_clk_set_defaults(pdev->dev.of_node, false);
        if (rc) {
-               dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+               DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
                goto err;
        }
 
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644 (file)
index d5e6ce0..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)    "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
-       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-       return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
-       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-       if (!dpu_kms->dev || !dpu_kms->dev->dev) {
-               pr_err("invalid device handles\n");
-               return;
-       }
-
-       dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
-       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-       int rc;
-
-       if (!kms) {
-               DPU_ERROR("invalid parameters\n");
-               return -EINVAL;
-       }
-
-       rc = dpu_core_irq_postinstall(dpu_kms);
-
-       return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
-       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
-       if (!kms) {
-               DPU_ERROR("invalid parameters\n");
-               return;
-       }
-
-       dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644 (file)
index 3e147f7..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask:      enable status of MDSS level interrupt
- * @domain:            interrupt domain of this controller
- */
-struct dpu_irq_controller {
-       unsigned long enabled_mask;
-       struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms:               pointer to kms context
- * @return:            none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms:               pointer to kms context
- * @return:            0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev:           pointer to kms context
- * @return:            none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms:               pointer to kms context
- * @return:            interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
index 0a683e65a9f31ccb09286a85dbc9ffc51e4e808b..4d67b3c96702f93c12c1afcfc8d9615a7b5b47d2 100644 (file)
@@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
        struct dpu_danger_safe_status status;
        int i;
 
-       if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+       if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
                DPU_ERROR("invalid arg(s)\n");
                return 0;
        }
@@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 }
 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
 
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
-       debugfs_remove_recursive(dpu_kms->debugfs_danger);
-       dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
 {
-       dpu_kms->debugfs_danger = debugfs_create_dir("danger",
-                       parent);
-       if (!dpu_kms->debugfs_danger) {
-               DPU_ERROR("failed to create danger debugfs\n");
-               return -EINVAL;
-       }
+       struct dentry *entry = debugfs_create_dir("danger", parent);
+       if (IS_ERR_OR_NULL(entry))
+               return;
 
-       debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+       debugfs_create_file("danger_status", 0600, entry,
                        dpu_kms, &dpu_debugfs_danger_stats_fops);
-       debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+       debugfs_create_file("safe_status", 0600, entry,
                        dpu_kms, &dpu_debugfs_safe_stats_fops);
-
-       return 0;
 }
 
 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
 {
-       struct dpu_debugfs_regset32 *regset;
-       struct dpu_kms *dpu_kms;
+       struct dpu_debugfs_regset32 *regset = s->private;
+       struct dpu_kms *dpu_kms = regset->dpu_kms;
        struct drm_device *dev;
        struct msm_drm_private *priv;
        void __iomem *base;
        uint32_t i, addr;
 
-       if (!s || !s->private)
-               return 0;
-
-       regset = s->private;
-
-       dpu_kms = regset->dpu_kms;
-       if (!dpu_kms || !dpu_kms->mmio)
+       if (!dpu_kms->mmio)
                return 0;
 
        dev = dpu_kms->dev;
@@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
 
 static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
 {
-       void *p;
-       int rc;
-
-       p = dpu_hw_util_get_log_mask_ptr();
+       void *p = dpu_hw_util_get_log_mask_ptr();
+       struct dentry *entry;
 
-       if (!dpu_kms || !p)
+       if (!p)
                return -EINVAL;
 
-       dpu_kms->debugfs_root = debugfs_create_dir("debug",
-                                          dpu_kms->dev->primary->debugfs_root);
-       if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
-               DRM_ERROR("debugfs create_dir failed %ld\n",
-                         PTR_ERR(dpu_kms->debugfs_root));
-               return PTR_ERR(dpu_kms->debugfs_root);
-       }
-
-       rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
-       if (rc) {
-               DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
-               return rc;
-       }
+       entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
+       if (IS_ERR_OR_NULL(entry))
+               return -ENODEV;
 
        /* allow root to be NULL */
-       debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
-
-       (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
-       (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
-       (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
-
-       rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
-       if (rc) {
-               DPU_ERROR("failed to init perf %d\n", rc);
-               return rc;
-       }
+       debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
 
-       return 0;
-}
+       dpu_debugfs_danger_init(dpu_kms, entry);
+       dpu_debugfs_vbif_init(dpu_kms, entry);
+       dpu_debugfs_core_irq_init(dpu_kms, entry);
 
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
-       /* don't need to NULL check debugfs_root */
-       if (dpu_kms) {
-               dpu_debugfs_vbif_destroy(dpu_kms);
-               dpu_debugfs_danger_destroy(dpu_kms);
-               dpu_debugfs_core_irq_destroy(dpu_kms);
-               debugfs_remove_recursive(dpu_kms->debugfs_root);
-       }
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+       return dpu_core_perf_debugfs_init(dpu_kms, entry);
 }
 #endif
 
@@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
        struct dpu_kms *dpu_kms;
        struct msm_drm_private *priv;
        struct drm_device *dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        struct drm_encoder *encoder;
+       int i;
 
        if (!kms)
                return;
@@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
        priv = dev->dev_private;
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
-               if (encoder->crtc != NULL)
+       /* Call prepare_commit for all affected encoders */
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               drm_for_each_encoder_mask(encoder, crtc->dev,
+                                         crtc_state->encoder_mask) {
                        dpu_encoder_prepare_commit(encoder);
+               }
+       }
 }
 
 /*
@@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
 {
        const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
-       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_device *dev = encoder->dev;
+       struct drm_crtc *crtc;
 
        /* Forward this enable call to the commit hook */
        if (funcs && funcs->commit)
                funcs->commit(encoder);
 
-       if (crtc && crtc->state->active) {
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       drm_for_each_crtc(crtc, dev) {
+               if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
+                       continue;
+
                trace_dpu_kms_enc_enable(DRMID(crtc));
-               dpu_crtc_commit_kickoff(crtc);
+               dpu_crtc_commit_kickoff(crtc, false);
        }
 }
 
@@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 
                if (crtc->state->active) {
                        trace_dpu_kms_commit(DRMID(crtc));
-                       dpu_crtc_commit_kickoff(crtc);
+                       dpu_crtc_commit_kickoff(crtc,
+                                               state->legacy_cursor_update);
                }
        }
 }
@@ -613,22 +576,7 @@ fail:
 #ifdef CONFIG_DEBUG_FS
 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
 {
-       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-       struct drm_device *dev;
-       int rc;
-
-       if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
-               DPU_ERROR("invalid dpu_kms\n");
-               return -EINVAL;
-       }
-
-       dev = dpu_kms->dev;
-
-       rc = _dpu_debugfs_init(dpu_kms);
-       if (rc)
-               DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
-       return rc;
+       return _dpu_debugfs_init(to_dpu_kms(kms));
 }
 #endif
 
@@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
                dpu_hw_intr_destroy(dpu_kms->hw_intr);
        dpu_kms->hw_intr = NULL;
 
-       if (dpu_kms->power_event)
-               dpu_power_handle_unregister_event(
-                               &dpu_kms->phandle, dpu_kms->power_event);
-
        /* safe to call these more than once during shutdown */
-       _dpu_debugfs_destroy(dpu_kms);
        _dpu_kms_mmu_destroy(dpu_kms);
 
        if (dpu_kms->catalog) {
@@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
                dpu_hw_catalog_deinit(dpu_kms->catalog);
        dpu_kms->catalog = NULL;
 
-       if (dpu_kms->core_client)
-               dpu_power_client_destroy(&dpu_kms->phandle,
-                       dpu_kms->core_client);
-       dpu_kms->core_client = NULL;
-
        if (dpu_kms->vbif[VBIF_NRT])
                devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
        dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
 
        dpu_kms = to_dpu_kms(kms);
 
-       dpu_dbg_destroy();
        _dpu_kms_hw_destroy(dpu_kms);
 }
 
-static int dpu_kms_pm_suspend(struct device *dev)
-{
-       struct drm_device *ddev;
-       struct drm_modeset_acquire_ctx ctx;
-       struct drm_atomic_state *state;
-       struct dpu_kms *dpu_kms;
-       int ret = 0, num_crtcs = 0;
-
-       if (!dev)
-               return -EINVAL;
-
-       ddev = dev_get_drvdata(dev);
-       if (!ddev || !ddev_to_msm_kms(ddev))
-               return -EINVAL;
-
-       dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
-       /* disable hot-plug polling */
-       drm_kms_helper_poll_disable(ddev);
-
-       /* acquire modeset lock(s) */
-       drm_modeset_acquire_init(&ctx, 0);
-
-retry:
-       DPU_ATRACE_BEGIN("kms_pm_suspend");
-
-       ret = drm_modeset_lock_all_ctx(ddev, &ctx);
-       if (ret)
-               goto unlock;
-
-       /* save current state for resume */
-       if (dpu_kms->suspend_state)
-               drm_atomic_state_put(dpu_kms->suspend_state);
-       dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
-       if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
-               DRM_ERROR("failed to back up suspend state\n");
-               dpu_kms->suspend_state = NULL;
-               goto unlock;
-       }
-
-       /* create atomic state to disable all CRTCs */
-       state = drm_atomic_state_alloc(ddev);
-       if (IS_ERR_OR_NULL(state)) {
-               DRM_ERROR("failed to allocate crtc disable state\n");
-               goto unlock;
-       }
-
-       state->acquire_ctx = &ctx;
-
-       /* check for nothing to do */
-       if (num_crtcs == 0) {
-               DRM_DEBUG("all crtcs are already in the off state\n");
-               drm_atomic_state_put(state);
-               goto suspended;
-       }
-
-       /* commit the "disable all" state */
-       ret = drm_atomic_commit(state);
-       if (ret < 0) {
-               DRM_ERROR("failed to disable crtcs, %d\n", ret);
-               drm_atomic_state_put(state);
-               goto unlock;
-       }
-
-suspended:
-       dpu_kms->suspend_block = true;
-
-unlock:
-       if (ret == -EDEADLK) {
-               drm_modeset_backoff(&ctx);
-               goto retry;
-       }
-       drm_modeset_drop_locks(&ctx);
-       drm_modeset_acquire_fini(&ctx);
-
-       DPU_ATRACE_END("kms_pm_suspend");
-       return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
-       struct drm_device *ddev;
-       struct dpu_kms *dpu_kms;
-       int ret;
-
-       if (!dev)
-               return -EINVAL;
-
-       ddev = dev_get_drvdata(dev);
-       if (!ddev || !ddev_to_msm_kms(ddev))
-               return -EINVAL;
-
-       dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
-       DPU_ATRACE_BEGIN("kms_pm_resume");
-
-       drm_mode_config_reset(ddev);
-
-       drm_modeset_lock_all(ddev);
-
-       dpu_kms->suspend_block = false;
-
-       if (dpu_kms->suspend_state) {
-               dpu_kms->suspend_state->acquire_ctx =
-                       ddev->mode_config.acquire_ctx;
-               ret = drm_atomic_commit(dpu_kms->suspend_state);
-               if (ret < 0) {
-                       DRM_ERROR("failed to restore state, %d\n", ret);
-                       drm_atomic_state_put(dpu_kms->suspend_state);
-               }
-               dpu_kms->suspend_state = NULL;
-       }
-       drm_modeset_unlock_all(ddev);
-
-       /* enable hot-plug polling */
-       drm_kms_helper_poll_enable(ddev);
-
-       DPU_ATRACE_END("kms_pm_resume");
-       return 0;
-}
-
 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
                                 struct drm_encoder *encoder,
                                 bool cmd_mode)
@@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
                        encoder->base.id, rc);
 }
 
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       dpu_core_irq_uninstall(dpu_kms);
+}
+
 static const struct msm_kms_funcs kms_funcs = {
        .hw_init         = dpu_kms_hw_init,
        .irq_preinstall  = dpu_irq_preinstall,
-       .irq_postinstall = dpu_irq_postinstall,
        .irq_uninstall   = dpu_irq_uninstall,
        .irq             = dpu_irq,
        .prepare_commit  = dpu_kms_prepare_commit,
@@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
        .check_modified_format = dpu_format_check_modified_format,
        .get_format      = dpu_get_msm_format,
        .round_pixclk    = dpu_kms_round_pixclk,
-       .pm_suspend      = dpu_kms_pm_suspend,
-       .pm_resume       = dpu_kms_pm_resume,
        .destroy         = dpu_kms_destroy,
        .set_encoder_mode = _dpu_kms_set_encoder_mode,
 #ifdef CONFIG_DEBUG_FS
@@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
 #endif
 };
 
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
-       dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
 static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
 {
        struct msm_mmu *mmu;
@@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
        if (!domain)
                return 0;
 
+       domain->geometry.aperture_start = 0x1000;
+       domain->geometry.aperture_end = 0xffffffff;
+
        aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
                        domain, "dpu1");
        if (IS_ERR(aspace)) {
@@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
        return clk_get_rate(clk->clk);
 }
 
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
-       struct dpu_kms *dpu_kms = usr;
-
-       if (!dpu_kms)
-               return;
-
-       dpu_vbif_init_memtypes(dpu_kms);
-}
-
 static int dpu_kms_hw_init(struct msm_kms *kms)
 {
        struct dpu_kms *dpu_kms;
@@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
        if (!kms) {
                DPU_ERROR("invalid kms\n");
-               goto end;
+               return rc;
        }
 
        dpu_kms = to_dpu_kms(kms);
        dev = dpu_kms->dev;
        if (!dev) {
                DPU_ERROR("invalid device\n");
-               goto end;
-       }
-
-       rc = dpu_dbg_init(&dpu_kms->pdev->dev);
-       if (rc) {
-               DRM_ERROR("failed to init dpu dbg: %d\n", rc);
-               goto end;
+               return rc;
        }
 
        priv = dev->dev_private;
        if (!priv) {
                DPU_ERROR("invalid private data\n");
-               goto dbg_destroy;
+               return rc;
        }
 
        dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
        }
 
-       dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
-                                       "core");
-       if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
-               rc = PTR_ERR(dpu_kms->core_client);
-               if (!dpu_kms->core_client)
-                       rc = -EINVAL;
-               DPU_ERROR("dpu power client create failed: %d\n", rc);
-               dpu_kms->core_client = NULL;
-               goto error;
-       }
-
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
-       _dpu_kms_core_hw_rev_init(dpu_kms);
+       dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
 
        pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
 
@@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                goto power_error;
        }
 
-       dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
        /*
         * Now we need to read the HW catalog and initialize resources such as
         * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
        }
 
        rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
-                       &dpu_kms->phandle,
                        _dpu_kms_get_clk(dpu_kms, "core"));
        if (rc) {
                DPU_ERROR("failed to init perf %d\n", rc);
@@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
         */
        dev->mode_config.allow_fb_modifiers = true;
 
-       /*
-        * Handle (re)initializations during power enable
-        */
-       dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
-       dpu_kms->power_event = dpu_power_handle_register_event(
-                       &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
-                       dpu_kms_handle_power_event, dpu_kms, "kms");
+       dpu_vbif_init_memtypes(dpu_kms);
 
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
@@ -1171,9 +966,7 @@ power_error:
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 error:
        _dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
-       dpu_dbg_destroy();
-end:
+
        return rc;
 }
 
@@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
        platform_set_drvdata(pdev, dpu_kms);
 
        msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
        struct dss_module_power *mp = &dpu_kms->mp;
 
-       dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
        msm_dss_put_clk(mp->clk_config, mp->num_clk);
        devm_kfree(&pdev->dev, mp->clk_config);
        mp->num_clk = 0;
@@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
        ddev = dpu_kms->dev;
        if (!ddev) {
                DPU_ERROR("invalid drm_device\n");
-               goto exit;
+               return rc;
        }
 
-       rc = dpu_power_resource_enable(&dpu_kms->phandle,
-                       dpu_kms->core_client, false);
-       if (rc)
-               DPU_ERROR("resource disable failed: %d\n", rc);
-
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
        if (rc)
                DPU_ERROR("clock disable failed rc:%d\n", rc);
 
-exit:
        return rc;
 }
 
@@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
        int rc = -1;
        struct platform_device *pdev = to_platform_device(dev);
        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+       struct drm_encoder *encoder;
        struct drm_device *ddev;
        struct dss_module_power *mp = &dpu_kms->mp;
 
        ddev = dpu_kms->dev;
        if (!ddev) {
                DPU_ERROR("invalid drm_device\n");
-               goto exit;
+               return rc;
        }
 
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (rc) {
                DPU_ERROR("clock enable failed rc:%d\n", rc);
-               goto exit;
+               return rc;
        }
 
-       rc = dpu_power_resource_enable(&dpu_kms->phandle,
-                       dpu_kms->core_client, true);
-       if (rc)
-               DPU_ERROR("resource enable failed: %d\n", rc);
+       dpu_vbif_init_memtypes(dpu_kms);
+
+       drm_for_each_encoder(encoder, ddev)
+               dpu_encoder_virt_runtime_resume(encoder);
 
-exit:
        return rc;
 }
 
index 66d466628e2b90de86ba6d331678efcdedba5c77..ac75cfc267f40fe071db92554de0db7885b4b0c5 100644 (file)
 #include "msm_kms.h"
 #include "msm_mmu.h"
 #include "msm_gem.h"
-#include "dpu_dbg.h"
 #include "dpu_hw_catalog.h"
 #include "dpu_hw_ctl.h"
 #include "dpu_hw_lm.h"
 #include "dpu_hw_interrupts.h"
 #include "dpu_hw_top.h"
+#include "dpu_io_util.h"
 #include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
 #include "dpu_core_perf.h"
 
 #define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -104,7 +102,6 @@ struct dpu_irq {
        atomic_t *enable_counts;
        atomic_t *irq_counts;
        spinlock_t cb_lock;
-       struct dentry *debugfs_file;
 };
 
 struct dpu_kms {
@@ -113,15 +110,6 @@ struct dpu_kms {
        int core_rev;
        struct dpu_mdss_cfg *catalog;
 
-       struct dpu_power_handle phandle;
-       struct dpu_power_client *core_client;
-       struct dpu_power_event *power_event;
-
-       /* directory entry for debugfs */
-       struct dentry *debugfs_root;
-       struct dentry *debugfs_danger;
-       struct dentry *debugfs_vbif;
-
        /* io/register spaces: */
        void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
        unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +123,6 @@ struct dpu_kms {
 
        struct dpu_core_perf perf;
 
-       /* saved atomic state during system suspend */
-       struct drm_atomic_state *suspend_state;
-       bool suspend_block;
-
        struct dpu_rm rm;
        bool rm_init;
 
@@ -163,33 +147,6 @@ struct vsync_info {
 #define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
                ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
 
-/**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
-       if (!ddev_to_msm_kms(dev))
-               return false;
-
-       return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- *                             suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
-       if (!dpu_kms_is_suspend_state(dev))
-               return false;
-
-       return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
 /**
  * Debugfs functions - extra helper functions for debugfs support
  *
index 2235ef8129f4b02e85e0779ef52429fbf3f29836..cb307a2abf06c6136976f861e28525a244a34882 100644 (file)
@@ -9,6 +9,11 @@
 
 #define HW_INTR_STATUS                 0x0010
 
+struct dpu_irq_controller {
+       unsigned long enabled_mask;
+       struct irq_domain *domain;
+};
+
 struct dpu_mdss {
        struct msm_mdss base;
        void __iomem *mmio;
@@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
        return 0;
 }
 
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
 {
        if (dpu_mdss->irq_controller.domain) {
                irq_domain_remove(dpu_mdss->irq_controller.domain);
                dpu_mdss->irq_controller.domain = NULL;
        }
-       return 0;
 }
 static int dpu_mdss_enable(struct msm_mdss *mdss)
 {
@@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
        struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
        struct dss_module_power *mp = &dpu_mdss->mp;
 
+       pm_runtime_suspend(dev->dev);
+       pm_runtime_disable(dev->dev);
        _dpu_mdss_irq_domain_fini(dpu_mdss);
-
        free_irq(platform_get_irq(pdev, 0), dpu_mdss);
-
        msm_dss_put_clk(mp->clk_config, mp->num_clk);
        devm_kfree(&pdev->dev, mp->clk_config);
 
        if (dpu_mdss->mmio)
                devm_iounmap(&pdev->dev, dpu_mdss->mmio);
        dpu_mdss->mmio = NULL;
-
-       pm_runtime_disable(dev->dev);
        priv->mdss = NULL;
 }
 
index d77a8cb1540484157f75113fbfae5ba7dbba0eb9..fd75870eb17f7c7d5e8f8446f526b19715042b03 100644 (file)
@@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
  * @src_wdith:         width of source buffer
  * Return: fill level corresponding to the source buffer/format or 0 if error
  */
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
                const struct dpu_format *fmt, u32 src_width)
 {
        struct dpu_plane *pdpu, *tmp;
@@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
        dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
 }
 
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
-               struct dpu_plane *pdpu)
-{
-       struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
-
-       return kms->base.aspace;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
                struct dpu_plane_state *pstate,
                struct dpu_hw_pipe_cfg *pipe_cfg,
                struct drm_framebuffer *fb)
 {
        struct dpu_plane *pdpu = to_dpu_plane(plane);
-       struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+       struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+       struct msm_gem_address_space *aspace = kms->base.aspace;
        int ret;
 
        ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
        scale_cfg->enable = 1;
 }
 
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
 {
        static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
                {
@@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
        struct drm_gem_object *obj;
        struct msm_gem_object *msm_obj;
        struct dma_fence *fence;
-       struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+       struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
        int ret;
 
        if (!new_state->fb)
@@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
        DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
 
        /* cache aspace */
-       pstate->aspace = aspace;
+       pstate->aspace = kms->base.aspace;
 
        /*
         * TODO: Need to sort out the msm_framebuffer_prepare() call below so
@@ -1191,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
 static void dpu_plane_destroy_state(struct drm_plane *plane,
                struct drm_plane_state *state)
 {
-       struct dpu_plane_state *pstate;
-
-       if (!plane || !state) {
-               DPU_ERROR("invalid arg(s), plane %d state %d\n",
-                               plane != 0, state != 0);
-               return;
-       }
-
-       pstate = to_dpu_plane_state(state);
-
        __drm_atomic_helper_plane_destroy_state(state);
-
-       kfree(pstate);
+       kfree(to_dpu_plane_state(state));
 }
 
 static struct drm_plane_state *
@@ -1273,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
                        char __user *buff, size_t count, loff_t *ppos)
 {
        struct dpu_kms *kms = file->private_data;
-       struct dpu_mdss_cfg *cfg = kms->catalog;
-       int len = 0;
-       char buf[40] = {'\0'};
-
-       if (!cfg)
-               return -ENODEV;
+       int len;
+       char buf[40];
 
-       if (*ppos)
-               return 0; /* the end */
-
-       len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
-       if (len < 0 || len >= sizeof(buf))
-               return 0;
+       len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
 
-       if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
-               return -EFAULT;
-
-       *ppos += len;   /* increase offset */
-
-       return len;
+       return simple_read_from_buffer(buff, count, ppos, buf, len);
 }
 
 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1322,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
                    const char __user *user_buf, size_t count, loff_t *ppos)
 {
        struct dpu_kms *kms = file->private_data;
-       struct dpu_mdss_cfg *cfg = kms->catalog;
        int disable_panic;
-       char buf[10];
-
-       if (!cfg)
-               return -EFAULT;
-
-       if (count >= sizeof(buf))
-               return -EFAULT;
-
-       if (copy_from_user(buf, user_buf, count))
-               return -EFAULT;
-
-       buf[count] = 0; /* end of string */
+       int ret;
 
-       if (kstrtoint(buf, 0, &disable_panic))
-               return -EFAULT;
+       ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+       if (ret)
+               return ret;
 
        if (disable_panic) {
                /* Disable panic signal for all active pipes */
@@ -1363,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
 
 static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 {
-       struct dpu_plane *pdpu;
-       struct dpu_kms *kms;
-       struct msm_drm_private *priv;
-       const struct dpu_sspp_sub_blks *sblk = 0;
-       const struct dpu_sspp_cfg *cfg = 0;
-
-       if (!plane || !plane->dev) {
-               DPU_ERROR("invalid arguments\n");
-               return -EINVAL;
-       }
-
-       priv = plane->dev->dev_private;
-       if (!priv || !priv->kms) {
-               DPU_ERROR("invalid KMS reference\n");
-               return -EINVAL;
-       }
-
-       kms = to_dpu_kms(priv->kms);
-       pdpu = to_dpu_plane(plane);
-
-       if (pdpu && pdpu->pipe_hw)
-               cfg = pdpu->pipe_hw->cap;
-       if (cfg)
-               sblk = cfg->sblk;
-
-       if (!sblk)
-               return 0;
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+       const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+       const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
 
        /* create overall sub-directory for the pipe */
        pdpu->debugfs_root =
@@ -1460,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 
        return 0;
 }
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-       struct dpu_plane *pdpu;
-
-       if (!plane)
-               return;
-       pdpu = to_dpu_plane(plane);
-
-       debugfs_remove_recursive(pdpu->debugfs_root);
-}
 #else
 static int _dpu_plane_init_debugfs(struct drm_plane *plane)
 {
        return 0;
 }
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
 #endif
 
 static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1488,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
 
 static void dpu_plane_early_unregister(struct drm_plane *plane)
 {
-       _dpu_plane_destroy_debugfs(plane);
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+       debugfs_remove_recursive(pdpu->debugfs_root);
 }
 
 static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1537,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
        if (!pdpu) {
                DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
                ret = -ENOMEM;
-               goto exit;
+               return ERR_PTR(ret);
        }
 
        /* cache local stuff for later */
@@ -1623,6 +1542,5 @@ clean_sspp:
                dpu_hw_sspp_destroy(pdpu->pipe_hw);
 clean_plane:
        kfree(pdpu);
-exit:
        return ERR_PTR(ret);
 }
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644 (file)
index fc14116..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt)    "[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
-       [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
-       [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
-       [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
-       if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
-               return data_bus_name[bus_id];
-
-       return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
-               u32 event_type)
-{
-       struct dpu_power_event *event;
-
-       list_for_each_entry(event, &phandle->event_list, list) {
-               if (event->event_type & event_type)
-                       event->cb_fnc(event_type, event->usr);
-       }
-}
-
-struct dpu_power_client *dpu_power_client_create(
-       struct dpu_power_handle *phandle, char *client_name)
-{
-       struct dpu_power_client *client;
-       static u32 id;
-
-       if (!client_name || !phandle) {
-               pr_err("client name is null or invalid power data\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
-       if (!client)
-               return ERR_PTR(-ENOMEM);
-
-       mutex_lock(&phandle->phandle_lock);
-       strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
-       client->usecase_ndx = VOTE_INDEX_DISABLE;
-       client->id = id;
-       client->active = true;
-       pr_debug("client %s created:%pK id :%d\n", client_name,
-               client, id);
-       id++;
-       list_add(&client->list, &phandle->power_client_clist);
-       mutex_unlock(&phandle->phandle_lock);
-
-       return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
-       struct dpu_power_client *client)
-{
-       if (!client  || !phandle) {
-               pr_err("reg bus vote: invalid client handle\n");
-       } else if (!client->active) {
-               pr_err("dpu power deinit already done\n");
-               kfree(client);
-       } else {
-               pr_debug("bus vote client %s destroyed:%pK id:%u\n",
-                       client->name, client, client->id);
-               mutex_lock(&phandle->phandle_lock);
-               list_del_init(&client->list);
-               mutex_unlock(&phandle->phandle_lock);
-               kfree(client);
-       }
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
-       struct dpu_power_handle *phandle)
-{
-       phandle->dev = &pdev->dev;
-
-       INIT_LIST_HEAD(&phandle->power_client_clist);
-       INIT_LIST_HEAD(&phandle->event_list);
-
-       mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
-       struct dpu_power_handle *phandle)
-{
-       struct dpu_power_client *curr_client, *next_client;
-       struct dpu_power_event *curr_event, *next_event;
-
-       if (!phandle || !pdev) {
-               pr_err("invalid input param\n");
-               return;
-       }
-
-       mutex_lock(&phandle->phandle_lock);
-       list_for_each_entry_safe(curr_client, next_client,
-                       &phandle->power_client_clist, list) {
-               pr_err("client:%s-%d still registered with refcount:%d\n",
-                               curr_client->name, curr_client->id,
-                               curr_client->refcount);
-               curr_client->active = false;
-               list_del(&curr_client->list);
-       }
-
-       list_for_each_entry_safe(curr_event, next_event,
-                       &phandle->event_list, list) {
-               pr_err("event:%d, client:%s still registered\n",
-                               curr_event->event_type,
-                               curr_event->client_name);
-               curr_event->active = false;
-               list_del(&curr_event->list);
-       }
-       mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
-       struct dpu_power_client *pclient, bool enable)
-{
-       bool changed = false;
-       u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
-       struct dpu_power_client *client;
-       u32 event_type;
-
-       if (!phandle || !pclient) {
-               pr_err("invalid input argument\n");
-               return -EINVAL;
-       }
-
-       mutex_lock(&phandle->phandle_lock);
-       if (enable)
-               pclient->refcount++;
-       else if (pclient->refcount)
-               pclient->refcount--;
-
-       if (pclient->refcount)
-               pclient->usecase_ndx = VOTE_INDEX_LOW;
-       else
-               pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
-       list_for_each_entry(client, &phandle->power_client_clist, list) {
-               if (client->usecase_ndx < VOTE_INDEX_MAX &&
-                   client->usecase_ndx > max_usecase_ndx)
-                       max_usecase_ndx = client->usecase_ndx;
-       }
-
-       if (phandle->current_usecase_ndx != max_usecase_ndx) {
-               changed = true;
-               prev_usecase_ndx = phandle->current_usecase_ndx;
-               phandle->current_usecase_ndx = max_usecase_ndx;
-       }
-
-       pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
-               __builtin_return_address(0), changed, max_usecase_ndx,
-               pclient->name, pclient->id, enable, pclient->refcount);
-
-       if (!changed)
-               goto end;
-
-       event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
-
-       dpu_power_event_trigger_locked(phandle, event_type);
-end:
-       mutex_unlock(&phandle->phandle_lock);
-       return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
-               struct dpu_power_handle *phandle,
-               u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-               void *usr, char *client_name)
-{
-       struct dpu_power_event *event;
-
-       if (!phandle) {
-               pr_err("invalid power handle\n");
-               return ERR_PTR(-EINVAL);
-       } else if (!cb_fnc || !event_type) {
-               pr_err("no callback fnc or event type\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
-       if (!event)
-               return ERR_PTR(-ENOMEM);
-
-       event->event_type = event_type;
-       event->cb_fnc = cb_fnc;
-       event->usr = usr;
-       strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
-       event->active = true;
-
-       mutex_lock(&phandle->phandle_lock);
-       list_add(&event->list, &phandle->event_list);
-       mutex_unlock(&phandle->phandle_lock);
-
-       return event;
-}
-
-void dpu_power_handle_unregister_event(
-               struct dpu_power_handle *phandle,
-               struct dpu_power_event *event)
-{
-       if (!phandle || !event) {
-               pr_err("invalid phandle or event\n");
-       } else if (!event->active) {
-               pr_err("power handle deinit already done\n");
-               kfree(event);
-       } else {
-               mutex_lock(&phandle->phandle_lock);
-               list_del_init(&event->list);
-               mutex_unlock(&phandle->phandle_lock);
-               kfree(event);
-       }
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644 (file)
index a65b7a2..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA   0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA  0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA   1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA  0
-
-#include "dpu_io_util.h"
-
-/* events will be triggered on power handler enable/disable */
-#define DPU_POWER_EVENT_DISABLE        BIT(0)
-#define DPU_POWER_EVENT_ENABLE BIT(1)
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
-       VOTE_INDEX_DISABLE,
-       VOTE_INDEX_LOW,
-       VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
-       DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
-       DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
-       DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
-       DPU_POWER_HANDLE_DBUS_ID_MNOC,
-       DPU_POWER_HANDLE_DBUS_ID_LLCC,
-       DPU_POWER_HANDLE_DBUS_ID_EBI,
-       DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name:      name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount:  current refcount if multiple modules are using same
- *              same client for enable/disable. Power module will
- *              aggregate the refcount and vote accordingly for this
- *              client.
- * @id:                assigned during create. helps for debugging.
- * @list:      list to attach power handle master list
- * @ab:         arbitrated bandwidth for each bus client
- * @ib:         instantaneous bandwidth for each bus client
- * @active:    inidcates the state of dpu power handle
- */
-struct dpu_power_client {
-       char name[MAX_CLIENT_NAME_LEN];
-       short usecase_ndx;
-       short refcount;
-       u32 id;
-       struct list_head list;
-       u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-       u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
-       bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
-       char client_name[MAX_CLIENT_NAME_LEN];
-       void (*cb_fnc)(u32 event_type, void *usr);
-       void *usr;
-       u32 event_type;
-       struct list_head list;
-       bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
-       struct list_head power_client_clist;
-       struct mutex phandle_lock;
-       struct device *dev;
-       u32 current_usecase_ndx;
-       struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev:   platform device to search the power resources
- * @pdata:  power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
-       struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev:   platform device for power resources
- * @pdata:  power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
-       struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
-       char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata:  power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
-       struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata:  power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
-       struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle:  power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
-               struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- *     Clients can register for multiple events with a single register.
- *     Any block with access to phandle can register for the event
- *     notification.
- * @phandle:   power handle containing the resources
- * @event_type:        event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc:    pointer to desired callback function
- * @usr:       user pointer to pass to callback on event trigger
- *
- * Return:     event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
-               struct dpu_power_handle *phandle,
-               u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
-               void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle:   power handle containing the resources
- * @event:     event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
-               struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id:    data bus identifier
- * Return:     Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
index e12c4cefb7427047b582b7d0e87b8a1ab6c11aa4..c78b521ceda1bd06f7e06635ff86e3eaab483186 100644 (file)
@@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
                        __entry->vbif_idx)
 )
 
-TRACE_EVENT(dpu_perf_update_bus,
-       TP_PROTO(int client, unsigned long long ab_quota,
-       unsigned long long ib_quota),
-       TP_ARGS(client, ab_quota, ib_quota),
-       TP_STRUCT__entry(
-                       __field(int, client)
-                       __field(u64, ab_quota)
-                       __field(u64, ib_quota)
-       ),
-       TP_fast_assign(
-                       __entry->client = client;
-                       __entry->ab_quota = ab_quota;
-                       __entry->ib_quota = ib_quota;
-       ),
-       TP_printk("Request client:%d ab=%llu ib=%llu",
-                       __entry->client,
-                       __entry->ab_quota,
-                       __entry->ib_quota)
-)
-
-
 TRACE_EVENT(dpu_cmd_release_bw,
        TP_PROTO(u32 crtc_id),
        TP_ARGS(crtc_id),
@@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
        TP_PROTO(uint32_t drm_id),
        TP_ARGS(drm_id)
 );
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+       TP_PROTO(uint32_t drm_id),
+       TP_ARGS(drm_id)
+);
 
 TRACE_EVENT(dpu_enc_enable,
        TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
        TP_PROTO(uint32_t drm_id, u32 event),
        TP_ARGS(drm_id, event)
 );
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
-       TP_PROTO(uint32_t drm_id, u32 event),
-       TP_ARGS(drm_id, event)
-);
 DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
        TP_PROTO(uint32_t drm_id, u32 event),
        TP_ARGS(drm_id, event)
@@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
                __field(        uint32_t,               enc_id  )
                __field(        bool,                   enable  )
                __field(        bool,                   enabled )
-               __field(        bool,                   suspend )
-               __field(        bool,                   vblank_requested )
        ),
        TP_fast_assign(
                __entry->drm_id = drm_id;
                __entry->enc_id = enc_id;
                __entry->enable = enable;
                __entry->enabled = crtc->enabled;
-               __entry->suspend = crtc->suspend;
-               __entry->vblank_requested = crtc->vblank_requested;
        ),
-       TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
-                 "vblank_req:%s}",
+       TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
                  __entry->drm_id, __entry->enc_id,
                  __entry->enable ? "true" : "false",
-                 __entry->enabled ? "true" : "false",
-                 __entry->suspend ? "true" : "false",
-                 __entry->vblank_requested ? "true" : "false")
+                 __entry->enabled ? "true" : "false")
 );
 
 DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
                __field(        uint32_t,               drm_id  )
                __field(        bool,                   enable  )
                __field(        bool,                   enabled )
-               __field(        bool,                   suspend )
-               __field(        bool,                   vblank_requested )
        ),
        TP_fast_assign(
                __entry->drm_id = drm_id;
                __entry->enable = enable;
                __entry->enabled = crtc->enabled;
-               __entry->suspend = crtc->suspend;
-               __entry->vblank_requested = crtc->vblank_requested;
        ),
-       TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+       TP_printk("id:%u enable:%s state{enabled:%s}",
                  __entry->drm_id, __entry->enable ? "true" : "false",
-                 __entry->enabled ? "true" : "false",
-                 __entry->suspend ? "true" : "false",
-                 __entry->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
-       TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
-       TP_ARGS(drm_id, enable, crtc)
+                 __entry->enabled ? "true" : "false")
 );
 DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
        TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
                  __entry->stop_req ? "true" : "false", __entry->clk_rate)
 );
 
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+       TP_PROTO(u32 new_bits, u32 pending_mask),
+       TP_ARGS(new_bits, pending_mask),
+       TP_STRUCT__entry(
+               __field(        u32,                    new_bits        )
+               __field(        u32,                    pending_mask    )
+       ),
+       TP_fast_assign(
+               __entry->new_bits = new_bits;
+               __entry->pending_mask = pending_mask;
+       ),
+       TP_printk("new=%x existing=%x", __entry->new_bits,
+                 __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+       TP_PROTO(u32 pending_mask, u32 ctl_flush),
+       TP_ARGS(pending_mask, ctl_flush),
+       TP_STRUCT__entry(
+               __field(        u32,                    pending_mask    )
+               __field(        u32,                    ctl_flush       )
+       ),
+       TP_fast_assign(
+               __entry->pending_mask = pending_mask;
+               __entry->ctl_flush = ctl_flush;
+       ),
+       TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+                 __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+       TP_PROTO(u32 pending_mask, u32 ctl_flush),
+       TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+            dpu_hw_ctl_trigger_pending_flush,
+       TP_PROTO(u32 pending_mask, u32 ctl_flush),
+       TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+       TP_PROTO(u32 pending_mask, u32 ctl_flush),
+       TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+       TP_PROTO(u32 pending_mask, u32 ctl_flush),
+       TP_ARGS(pending_mask, ctl_flush)
+);
+
 #define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
 #define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
 #define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
index 2955282922964deb660171efc381ed932983a8d9..ef753ea9c4999a0341b35391aeb46a37f4858dff 100644 (file)
@@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
        ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
 
        if (ot_lim == 0)
-               goto exit;
+               return;
 
        trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
                params->vbif_idx);
@@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
 
        if (forced_on)
                mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
-       return;
 }
 
 void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
 }
 
 #ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-       debugfs_remove_recursive(dpu_kms->debugfs_vbif);
-       dpu_kms->debugfs_vbif = NULL;
-}
 
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
 {
        char vbif_name[32];
-       struct dentry *debugfs_vbif;
+       struct dentry *entry, *debugfs_vbif;
        int i, j;
 
-       dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
-       if (!dpu_kms->debugfs_vbif) {
-               DPU_ERROR("failed to create vbif debugfs\n");
-               return -EINVAL;
-       }
+       entry = debugfs_create_dir("vbif", debugfs_root);
+       if (IS_ERR_OR_NULL(entry))
+               return;
 
        for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
                struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
 
                snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
 
-               debugfs_vbif = debugfs_create_dir(vbif_name,
-                               dpu_kms->debugfs_vbif);
+               debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+               if (IS_ERR_OR_NULL(debugfs_vbif))
+                       continue;
 
                debugfs_create_u32("features", 0600, debugfs_vbif,
                        (u32 *)&vbif->features);
@@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
                                        (u32 *)&cfg->ot_limit);
                }
        }
-
-       return 0;
 }
 #endif
index f17af52dbbd58e14b1a87c94eec6132e1fc7d277..6356876d7a66d8833ea66b36a7a5340f8eaa4baf 100644 (file)
@@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
  */
 void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
 
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
-               struct dentry *debugfs_root)
-{
-       return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
 #endif /* __DPU_VBIF_H__ */
index 4f12e5c534c8c4d7cfc389b6174133449d613183..9fc9dbde8a27c1d7078c3d6b260c8635f27a514f 100644 (file)
@@ -813,18 +813,6 @@ enum color_fmts {
 #define COLOR_FMT_P010_UBWC            COLOR_FMT_P010_UBWC
 #define COLOR_FMT_P010         COLOR_FMT_P010
 
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
-       (void)height;
-       (void)width;
-
-       /*
-        * In the future, calculate the size based on the w/h but just
-        * hardcode it for now since 16K satisfies all current usecases.
-        */
-       return 16 * 1024;
-}
-
 /*
  * Function arguments:
  * @color_fmt
@@ -832,38 +820,32 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
 {
-       unsigned int alignment, stride = 0;
+       unsigned int stride = 0;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV21:
        case COLOR_FMT_NV12:
        case COLOR_FMT_NV12_MVTB:
        case COLOR_FMT_NV12_UBWC:
-               alignment = 128;
-               stride = MSM_MEDIA_ALIGN(width, alignment);
+               stride = MSM_MEDIA_ALIGN(width, 128);
                break;
        case COLOR_FMT_NV12_BPP10_UBWC:
-               alignment = 256;
                stride = MSM_MEDIA_ALIGN(width, 192);
-               stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+               stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
                break;
        case COLOR_FMT_P010_UBWC:
-               alignment = 256;
-               stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+               stride = MSM_MEDIA_ALIGN(width * 2, 256);
                break;
        case COLOR_FMT_P010:
-               alignment = 128;
-               stride = MSM_MEDIA_ALIGN(width*2, alignment);
-               break;
-       default:
+               stride = MSM_MEDIA_ALIGN(width * 2, 128);
                break;
        }
-invalid_input:
+
        return stride;
 }
 
@@ -874,38 +856,32 @@ invalid_input:
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
 {
-       unsigned int alignment, stride = 0;
+       unsigned int stride = 0;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV21:
        case COLOR_FMT_NV12:
        case COLOR_FMT_NV12_MVTB:
        case COLOR_FMT_NV12_UBWC:
-               alignment = 128;
-               stride = MSM_MEDIA_ALIGN(width, alignment);
+               stride = MSM_MEDIA_ALIGN(width, 128);
                break;
        case COLOR_FMT_NV12_BPP10_UBWC:
-               alignment = 256;
                stride = MSM_MEDIA_ALIGN(width, 192);
-               stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+               stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
                break;
        case COLOR_FMT_P010_UBWC:
-               alignment = 256;
-               stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+               stride = MSM_MEDIA_ALIGN(width * 2, 256);
                break;
        case COLOR_FMT_P010:
-               alignment = 128;
-               stride = MSM_MEDIA_ALIGN(width*2, alignment);
-               break;
-       default:
+               stride = MSM_MEDIA_ALIGN(width * 2, 128);
                break;
        }
-invalid_input:
+
        return stride;
 }
 
@@ -916,12 +892,12 @@ invalid_input:
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
 {
-       unsigned int alignment, sclines = 0;
+       unsigned int sclines = 0;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV21:
@@ -929,17 +905,14 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
        case COLOR_FMT_NV12_MVTB:
        case COLOR_FMT_NV12_UBWC:
        case COLOR_FMT_P010:
-               alignment = 32;
+               sclines = MSM_MEDIA_ALIGN(height, 32);
                break;
        case COLOR_FMT_NV12_BPP10_UBWC:
        case COLOR_FMT_P010_UBWC:
-               alignment = 16;
+               sclines = MSM_MEDIA_ALIGN(height, 16);
                break;
-       default:
-               return 0;
        }
-       sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
        return sclines;
 }
 
@@ -950,12 +923,12 @@ invalid_input:
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
 {
-       unsigned int alignment, sclines = 0;
+       unsigned int sclines = 0;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
        case COLOR_FMT_NV12_BPP10_UBWC:
        case COLOR_FMT_P010_UBWC:
        case COLOR_FMT_P010:
-               alignment = 16;
+               sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
                break;
        case COLOR_FMT_NV12_UBWC:
-               alignment = 32;
+               sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
                break;
-       default:
-               goto invalid_input;
        }
 
-       sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
        return sclines;
 }
 
@@ -986,12 +954,12 @@ invalid_input:
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
 {
-       int y_tile_width = 0, y_meta_stride = 0;
+       int y_tile_width = 0, y_meta_stride;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
                y_tile_width = 48;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
        y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
-       y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
-       return y_meta_stride;
+       return MSM_MEDIA_ALIGN(y_meta_stride, 64);
 }
 
 /*
@@ -1019,12 +984,12 @@ invalid_input:
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
 {
-       int y_tile_height = 0, y_meta_scanlines = 0;
+       int y_tile_height = 0, y_meta_scanlines;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
                y_tile_height = 4;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
        y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
-       y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
-       return y_meta_scanlines;
+       return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
 }
 
 /*
@@ -1052,12 +1014,12 @@ invalid_input:
  * Progressive: width
  * Interlaced: width
  */
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
 {
-       int uv_tile_width = 0, uv_meta_stride = 0;
+       int uv_tile_width = 0, uv_meta_stride;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
                uv_tile_width = 24;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
        uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
-       uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
-       return uv_meta_stride;
+       return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
 }
 
 /*
@@ -1085,12 +1044,12 @@ invalid_input:
  * Progressive: height
  * Interlaced: (height+1)>>1
  */
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
 {
-       int uv_tile_height = 0, uv_meta_scanlines = 0;
+       int uv_tile_height = 0, uv_meta_scanlines;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
                uv_tile_height = 4;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
        uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
-       uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
-       return uv_meta_scanlines;
+       return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
 }
 
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
 {
-       unsigned int alignment = 0, stride = 0, bpp = 4;
+       unsigned int alignment = 0, bpp = 4;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
                alignment = 256;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
-       stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
-       return stride;
+       return MSM_MEDIA_ALIGN(width * bpp, alignment);
 }
 
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
 {
-       unsigned int alignment = 0, scanlines = 0;
+       unsigned int alignment = 0;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
                alignment = 16;
                break;
        default:
-               goto invalid_input;
+               return 0;
        }
 
-       scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
-       return scanlines;
+       return MSM_MEDIA_ALIGN(height, alignment);
 }
 
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
 {
-       int rgb_tile_width = 0, rgb_meta_stride = 0;
+       int rgb_meta_stride;
 
        if (!width)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_RGBA8888_UBWC:
        case COLOR_FMT_RGBA1010102_UBWC:
        case COLOR_FMT_RGB565_UBWC:
-               rgb_tile_width = 16;
-               break;
-       default:
-               goto invalid_input;
+               rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+               return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
        }
 
-       rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
-       rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
-       return rgb_meta_stride;
+       return 0;
 }
 
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
 {
-       int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+       int rgb_meta_scanlines;
 
        if (!height)
-               goto invalid_input;
+               return 0;
 
        switch (color_fmt) {
        case COLOR_FMT_RGBA8888_UBWC:
        case COLOR_FMT_RGBA1010102_UBWC:
        case COLOR_FMT_RGB565_UBWC:
-               rgb_tile_height = 4;
-               break;
-       default:
-               goto invalid_input;
+               rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+               return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
        }
 
-       rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
-       rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
-       return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
-       int color_fmt, int width, int height)
-{
-       const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
-       unsigned int uv_alignment = 0, size = 0;
-       unsigned int y_plane, uv_plane, y_stride,
-               uv_stride, y_sclines, uv_sclines;
-       unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
-       unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
-       unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
-       unsigned int y_meta_plane = 0, uv_meta_plane = 0;
-       unsigned int rgb_stride = 0, rgb_scanlines = 0;
-       unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
-       unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
-       if (!width || !height)
-               goto invalid_input;
-
-       y_stride = VENUS_Y_STRIDE(color_fmt, width);
-       uv_stride = VENUS_UV_STRIDE(color_fmt, width);
-       y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
-       uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
-       rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
-       rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
-       switch (color_fmt) {
-       case COLOR_FMT_NV21:
-       case COLOR_FMT_NV12:
-       case COLOR_FMT_P010:
-               uv_alignment = 4096;
-               y_plane = y_stride * y_sclines;
-               uv_plane = uv_stride * uv_sclines + uv_alignment;
-               size = y_plane + uv_plane +
-                               MSM_MEDIA_MAX(extra_size, 8 * y_stride);
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_NV12_MVTB:
-               uv_alignment = 4096;
-               y_plane = y_stride * y_sclines;
-               uv_plane = uv_stride * uv_sclines + uv_alignment;
-               size = y_plane + uv_plane;
-               size = 2 * size + extra_size;
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_NV12_UBWC:
-               y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
-               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-               uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
-               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-               y_meta_scanlines =
-                       VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
-               y_meta_plane = MSM_MEDIA_ALIGN(
-                       y_meta_stride * y_meta_scanlines, 4096);
-               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-               uv_meta_scanlines =
-                       VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
-               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-                       uv_meta_scanlines, 4096);
-
-               size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-                       uv_meta_plane)*2 +
-                       MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_NV12_BPP10_UBWC:
-               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-               y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
-               y_meta_plane = MSM_MEDIA_ALIGN(
-                               y_meta_stride * y_meta_scanlines, 4096);
-               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-               uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
-               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-                                       uv_meta_scanlines, 4096);
-
-               size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-                       uv_meta_plane +
-                       MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_P010_UBWC:
-               y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
-               uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
-               y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
-               y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
-               y_meta_plane = MSM_MEDIA_ALIGN(
-                               y_meta_stride * y_meta_scanlines, 4096);
-               uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
-               uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
-               uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
-                                       uv_meta_scanlines, 4096);
-
-               size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
-                       uv_meta_plane;
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_RGBA8888:
-               rgb_plane = MSM_MEDIA_ALIGN(rgb_stride  * rgb_scanlines, 4096);
-               size = rgb_plane;
-               size =  MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       case COLOR_FMT_RGBA8888_UBWC:
-       case COLOR_FMT_RGBA1010102_UBWC:
-       case COLOR_FMT_RGB565_UBWC:
-               rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
-                                                       4096);
-               rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
-               rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
-                                       height);
-               rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
-                                       rgb_meta_scanlines, 4096);
-               size = rgb_ubwc_plane + rgb_meta_plane;
-               size = MSM_MEDIA_ALIGN(size, 4096);
-               break;
-       default:
-               break;
-       }
-invalid_input:
-       return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
-       int color_fmt, int width, int height)
-{
-       unsigned int offset = 0;
-       unsigned int y_plane, uv_plane, y_stride,
-               uv_stride, y_sclines, uv_sclines;
-       if (!width || !height)
-               goto invalid_input;
-
-       y_stride = VENUS_Y_STRIDE(color_fmt, width);
-       uv_stride = VENUS_UV_STRIDE(color_fmt, width);
-       y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
-       uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
-       switch (color_fmt) {
-       case COLOR_FMT_NV12_MVTB:
-               y_plane = y_stride * y_sclines;
-               uv_plane = uv_stride * uv_sclines;
-               offset = y_plane + uv_plane;
-               break;
-       default:
-               break;
-       }
-invalid_input:
-       return offset;
+       return 0;
 }
 
 #endif
index 457c29dba4a1a096012fad1fd7b21e3b47cdb22e..8f2359dc87b4ea5b34aabe77bb288a365f5f3233 100644 (file)
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
        struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
        struct msm_kms *kms = &mdp4_kms->base.base;
 
-       msm_gem_put_iova(val, kms->aspace);
+       msm_gem_unpin_iova(val, kms->aspace);
        drm_gem_object_put_unlocked(val);
 }
 
@@ -384,7 +384,7 @@ static void update_cursor(struct drm_crtc *crtc)
                if (next_bo) {
                        /* take a obj ref + iova ref when we start scanning out: */
                        drm_gem_object_get(next_bo);
-                       msm_gem_get_iova(next_bo, kms->aspace, &iova);
+                       msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
 
                        /* enable cursor: */
                        mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
        int ret;
 
        if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
-               dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+               DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
                return -EINVAL;
        }
 
@@ -442,7 +442,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
        }
 
        if (cursor_bo) {
-               ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+               ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
                if (ret)
                        goto fail;
        } else {
index ba8e587f734b3d6632af0d7796ab9929a17808b5..a8fd14d4846b37d95160b6b00ea84c32a270c2bc 100644 (file)
@@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
        struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
 
        if (!dtv_pdata) {
-               dev_err(dev->dev, "could not find dtv pdata\n");
+               DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
                return;
        }
 
@@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
 
        ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
        if (ret)
-               dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+               DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
                        pc, ret);
 
        ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
        if (ret)
-               dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
 
        ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
        if (ret)
-               dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
 
        mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
 
@@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
 
        mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
        if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
-               dev_err(dev->dev, "failed to get hdmi_clk\n");
+               DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
                ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
                goto fail;
        }
 
        mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
        if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
-               dev_err(dev->dev, "failed to get tv_clk\n");
+               DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
                ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
                goto fail;
        }
index 44d1cda56974d7f6c106329ae0fac034fe4d38c6..e437aa806f7beaa57ec87fc2808cd0c7949c560a 100644 (file)
@@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
        DBG("found MDP4 version v%d.%d", major, minor);
 
        if (major != 4) {
-               dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+               DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
                                major, minor);
                ret = -ENXIO;
                goto out;
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
        struct msm_gem_address_space *aspace = kms->aspace;
 
        if (mdp4_kms->blank_cursor_iova)
-               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+               msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
        drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
 
        if (aspace) {
@@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
        clk_disable_unprepare(mdp4_kms->clk);
        if (mdp4_kms->pclk)
                clk_disable_unprepare(mdp4_kms->pclk);
-       clk_disable_unprepare(mdp4_kms->lut_clk);
+       if (mdp4_kms->lut_clk)
+               clk_disable_unprepare(mdp4_kms->lut_clk);
        if (mdp4_kms->axi_clk)
                clk_disable_unprepare(mdp4_kms->axi_clk);
 
@@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
        clk_prepare_enable(mdp4_kms->clk);
        if (mdp4_kms->pclk)
                clk_prepare_enable(mdp4_kms->pclk);
-       clk_prepare_enable(mdp4_kms->lut_clk);
+       if (mdp4_kms->lut_clk)
+               clk_prepare_enable(mdp4_kms->lut_clk);
        if (mdp4_kms->axi_clk)
                clk_prepare_enable(mdp4_kms->axi_clk);
 
@@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
                encoder = mdp4_lcdc_encoder_init(dev, panel_node);
                if (IS_ERR(encoder)) {
-                       dev_err(dev->dev, "failed to construct LCDC encoder\n");
+                       DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
                        return PTR_ERR(encoder);
                }
 
@@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
                connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
                if (IS_ERR(connector)) {
-                       dev_err(dev->dev, "failed to initialize LVDS connector\n");
+                       DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
                        return PTR_ERR(connector);
                }
 
@@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
        case DRM_MODE_ENCODER_TMDS:
                encoder = mdp4_dtv_encoder_init(dev);
                if (IS_ERR(encoder)) {
-                       dev_err(dev->dev, "failed to construct DTV encoder\n");
+                       DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
                        return PTR_ERR(encoder);
                }
 
@@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                        /* Construct bridge/connector for HDMI: */
                        ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
                        if (ret) {
-                               dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+                               DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
                                return ret;
                        }
                }
@@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
                encoder = mdp4_dsi_encoder_init(dev);
                if (IS_ERR(encoder)) {
                        ret = PTR_ERR(encoder);
-                       dev_err(dev->dev,
+                       DRM_DEV_ERROR(dev->dev,
                                "failed to construct DSI encoder: %d\n", ret);
                        return ret;
                }
@@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
 
                ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
                if (ret) {
-                       dev_err(dev->dev, "failed to initialize DSI: %d\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
                                ret);
                        return ret;
                }
 
                break;
        default:
-               dev_err(dev->dev, "Invalid or unsupported interface\n");
+               DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
                return -EINVAL;
        }
 
@@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
        for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
                plane = mdp4_plane_init(dev, vg_planes[i], false);
                if (IS_ERR(plane)) {
-                       dev_err(dev->dev,
+                       DRM_DEV_ERROR(dev->dev,
                                "failed to construct plane for VG%d\n", i + 1);
                        ret = PTR_ERR(plane);
                        goto fail;
@@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
        for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
                plane = mdp4_plane_init(dev, rgb_planes[i], true);
                if (IS_ERR(plane)) {
-                       dev_err(dev->dev,
+                       DRM_DEV_ERROR(dev->dev,
                                "failed to construct plane for RGB%d\n", i + 1);
                        ret = PTR_ERR(plane);
                        goto fail;
@@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
                crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
                                mdp4_crtcs[i]);
                if (IS_ERR(crtc)) {
-                       dev_err(dev->dev, "failed to construct crtc for %s\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
                                mdp4_crtc_names[i]);
                        ret = PTR_ERR(crtc);
                        goto fail;
@@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
        for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
                ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
                if (ret) {
-                       dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
                                i, ret);
                        goto fail;
                }
@@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
 
        mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
        if (!mdp4_kms) {
-               dev_err(dev->dev, "failed to allocate kms\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
                ret = -ENOMEM;
                goto fail;
        }
@@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = irq;
-               dev_err(dev->dev, "failed to get irq: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
                goto fail;
        }
 
@@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        if (mdp4_kms->vdd) {
                ret = regulator_enable(mdp4_kms->vdd);
                if (ret) {
-                       dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
                        goto fail;
                }
        }
 
        mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
        if (IS_ERR(mdp4_kms->clk)) {
-               dev_err(dev->dev, "failed to get core_clk\n");
+               DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
                ret = PTR_ERR(mdp4_kms->clk);
                goto fail;
        }
@@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        if (IS_ERR(mdp4_kms->pclk))
                mdp4_kms->pclk = NULL;
 
-       // XXX if (rev >= MDP_REV_42) { ???
-       mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
-       if (IS_ERR(mdp4_kms->lut_clk)) {
-               dev_err(dev->dev, "failed to get lut_clk\n");
-               ret = PTR_ERR(mdp4_kms->lut_clk);
-               goto fail;
+       if (mdp4_kms->rev >= 2) {
+               mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+               if (IS_ERR(mdp4_kms->lut_clk)) {
+                       DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+                       ret = PTR_ERR(mdp4_kms->lut_clk);
+                       goto fail;
+               }
        }
 
        mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
        if (IS_ERR(mdp4_kms->axi_clk)) {
-               dev_err(dev->dev, "failed to get axi_clk\n");
+               DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
                ret = PTR_ERR(mdp4_kms->axi_clk);
                goto fail;
        }
 
        clk_set_rate(mdp4_kms->clk, config->max_clk);
-       clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+       if (mdp4_kms->lut_clk)
+               clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
 
        pm_runtime_enable(dev->dev);
        mdp4_kms->rpm_enabled = true;
@@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                if (ret)
                        goto fail;
        } else {
-               dev_info(dev->dev, "no iommu, fallback to phys "
+               DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
                                "contig buffers for scanout\n");
                aspace = NULL;
        }
 
        ret = modeset_init(mdp4_kms);
        if (ret) {
-               dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
                goto fail;
        }
 
-       mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+       mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
        if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
                ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
-               dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
                mdp4_kms->blank_cursor_bo = NULL;
                goto fail;
        }
 
-       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+       ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
                        &mdp4_kms->blank_cursor_iova);
        if (ret) {
-               dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
                goto fail;
        }
 
index 2bfb39082f54dd7f9e9c51e038a46e2955ed03c8..c9e34501a89e8c485743b8a27632783bde4355bb 100644 (file)
@@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
        struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
 
        if (!lcdc_pdata) {
-               dev_err(dev->dev, "could not find lvds pdata\n");
+               DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
                return;
        }
 
@@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
                break;
 
        default:
-               dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+               DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
                return;
        }
 
@@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
                                MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
                break;
        default:
-               dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+               DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
                return;
        }
 
@@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
        for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
                ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
                if (ret)
-                       dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
        }
 
        bs_set(mdp4_lcdc_encoder, 0);
@@ -377,20 +377,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
        unsigned long pc = mdp4_lcdc_encoder->pixclock;
        struct mdp4_kms *mdp4_kms = get_kms(encoder);
        struct drm_panel *panel;
+       uint32_t config;
        int i, ret;
 
        if (WARN_ON(mdp4_lcdc_encoder->enabled))
                return;
 
        /* TODO: hard-coded for 18bpp: */
-       mdp4_crtc_set_config(encoder->crtc,
-                       MDP4_DMA_CONFIG_R_BPC(BPC6) |
-                       MDP4_DMA_CONFIG_G_BPC(BPC6) |
-                       MDP4_DMA_CONFIG_B_BPC(BPC6) |
-                       MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
-                       MDP4_DMA_CONFIG_PACK(0x21) |
-                       MDP4_DMA_CONFIG_DEFLKR_EN |
-                       MDP4_DMA_CONFIG_DITHER_EN);
+       config =
+               MDP4_DMA_CONFIG_R_BPC(BPC6) |
+               MDP4_DMA_CONFIG_G_BPC(BPC6) |
+               MDP4_DMA_CONFIG_B_BPC(BPC6) |
+               MDP4_DMA_CONFIG_PACK(0x21) |
+               MDP4_DMA_CONFIG_DEFLKR_EN |
+               MDP4_DMA_CONFIG_DITHER_EN;
+
+       if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+               config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+       mdp4_crtc_set_config(encoder->crtc, config);
        mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
 
        bs_set(mdp4_lcdc_encoder, 1);
@@ -398,16 +403,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
        for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
                ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
                if (ret)
-                       dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
        }
 
        DBG("setting lcdc_clk=%lu", pc);
        ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
        if (ret)
-               dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
        ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
        if (ret)
-               dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
 
        panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
        if (!IS_ERR(panel)) {
@@ -461,7 +466,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        /* TODO: do we need different pll in other cases? */
        mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
        if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
-               dev_err(dev->dev, "failed to get lvds_clk\n");
+               DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
                ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
                goto fail;
        }
@@ -470,7 +475,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
        if (IS_ERR(reg)) {
                ret = PTR_ERR(reg);
-               dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
                goto fail;
        }
        mdp4_lcdc_encoder->regs[0] = reg;
@@ -478,7 +483,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
        if (IS_ERR(reg)) {
                ret = PTR_ERR(reg);
-               dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
                goto fail;
        }
        mdp4_lcdc_encoder->regs[1] = reg;
@@ -486,7 +491,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        reg = devm_regulator_get(dev->dev, "lvds-vdda");
        if (IS_ERR(reg)) {
                ret = PTR_ERR(reg);
-               dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
                goto fail;
        }
        mdp4_lcdc_encoder->regs[2] = reg;
index 7a499731ce9323ddbb1192413d09ef4902a80d86..005066f7154d3cacc7aec43c884dd8991b35dc92 100644 (file)
@@ -234,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
        format = to_mdp_format(msm_framebuffer_format(fb));
 
        if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
-               dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+               DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
                return -ERANGE;
        }
 
        if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
-               dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+               DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
                return -ERANGE;
        }
 
        if (crtc_w > (src_w * UP_SCALE_MAX)) {
-               dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+               DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
                return -ERANGE;
        }
 
        if (crtc_h > (src_h * UP_SCALE_MAX)) {
-               dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+               DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
                return -ERANGE;
        }
 
index 824067d2d4277d36699b1f15c6d58f74a97fe23f..ea8f7d7daf7f4dd76e20d5b075b328f3b02a152e 100644 (file)
@@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
        .max_clk = 412500000,
 };
 
+const struct mdp5_cfg_hw msm8917_config = {
+       .name = "msm8917",
+       .mdp = {
+               .count = 1,
+               .caps = MDP_CAP_CDM,
+       },
+       .ctl = {
+               .count = 3,
+               .base = { 0x01000, 0x01200, 0x01400 },
+               .flush_hw_mask = 0xffffffff,
+       },
+       .pipe_vig = {
+               .count = 1,
+               .base = { 0x04000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SCALE      |
+                       MDP_PIPE_CAP_CSC        |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_rgb = {
+               .count = 2,
+               .base = { 0x14000, 0x16000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_DECIMATION |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_dma = {
+               .count = 1,
+               .base = { 0x24000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       0,
+       },
+       .pipe_cursor = {
+               .count = 1,
+               .base = { 0x34000 },
+               .caps = MDP_PIPE_CAP_HFLIP      |
+                       MDP_PIPE_CAP_VFLIP      |
+                       MDP_PIPE_CAP_SW_PIX_EXT |
+                       MDP_PIPE_CAP_CURSOR     |
+                       0,
+       },
+
+       .lm = {
+               .count = 2,
+               .base = { 0x44000, 0x45000 },
+               .instances = {
+                               { .id = 0, .pp = 0, .dspp = 0,
+                                 .caps = MDP_LM_CAP_DISPLAY, },
+                               { .id = 1, .pp = -1, .dspp = -1,
+                                 .caps = MDP_LM_CAP_WB },
+                            },
+               .nb_stages = 8,
+               .max_width = 2048,
+               .max_height = 0xFFFF,
+       },
+       .dspp = {
+               .count = 1,
+               .base = { 0x54000 },
+
+       },
+       .pp = {
+               .count = 1,
+               .base = { 0x70000 },
+       },
+       .cdm = {
+               .count = 1,
+               .base = { 0x79200 },
+       },
+       .intf = {
+               .base = { 0x6a000, 0x6a800 },
+               .connect = {
+                       [0] = INTF_DISABLED,
+                       [1] = INTF_DSI,
+               },
+       },
+       .max_clk = 320000000,
+};
+
 static const struct mdp5_cfg_handler cfg_handlers[] = {
        { .revision = 0, .config = { .hw = &msm8x74v1_config } },
        { .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
        { .revision = 6, .config = { .hw = &msm8x16_config } },
        { .revision = 9, .config = { .hw = &msm8x94_config } },
        { .revision = 7, .config = { .hw = &msm8x96_config } },
+       { .revision = 15, .config = { .hw = &msm8917_config } },
 };
 
 static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
        }
 
        if (major != 1) {
-               dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+               DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
                                major, minor);
                ret = -ENXIO;
                goto fail;
@@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
                break;
        }
        if (unlikely(!mdp5_cfg)) {
-               dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+               DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
                                major, minor);
                ret = -ENXIO;
                goto fail;
index d6f79dc755b46d9b53e491422c53c604dbe73f4f..c1962f29ec7d688e98ec57f40c9375210fc47af0 100644 (file)
@@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
        int pp_id = mixer->pp;
 
        if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
-               dev_err(dev, "vsync_clk is not initialized\n");
+               DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
                return -EINVAL;
        }
 
        total_lines_x100 = mode->vtotal * mode->vrefresh;
        if (!total_lines_x100) {
-               dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+               DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
                                __func__, mode->vtotal, mode->vrefresh);
                return -EINVAL;
        }
 
        vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
        if (vsync_clk_speed <= 0) {
-               dev_err(dev, "vsync_clk round rate failed %ld\n",
+               DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
                                                        vsync_clk_speed);
                return -EINVAL;
        }
@@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
        ret = clk_set_rate(mdp5_kms->vsync_clk,
                clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
        if (ret) {
-               dev_err(encoder->dev->dev,
+               DRM_DEV_ERROR(encoder->dev->dev,
                        "vsync_clk clk_set_rate failed, %d\n", ret);
                return ret;
        }
        ret = clk_prepare_enable(mdp5_kms->vsync_clk);
        if (ret) {
-               dev_err(encoder->dev->dev,
+               DRM_DEV_ERROR(encoder->dev->dev,
                        "vsync_clk clk_prepare_enable failed, %d\n", ret);
                return ret;
        }
index b1da9ce54379099f3fc33e4f48a49cd007b74fa1..c5fde1a4191aaa03d7a002e52b803a2689519667 100644 (file)
@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
        struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
        struct msm_kms *kms = &mdp5_kms->base.base;
 
-       msm_gem_put_iova(val, kms->aspace);
+       msm_gem_unpin_iova(val, kms->aspace);
        drm_gem_object_put_unlocked(val);
 }
 
@@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 
        ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
        if (ret) {
-               dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
                return ret;
        }
 
@@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
         * and that we don't have conflicting mixer stages:
         */
        if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
-               dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+               DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
                        cnt, start);
                return -EINVAL;
        }
@@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        }
 
        if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
-               dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+               DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
                return -EINVAL;
        }
 
@@ -903,7 +903,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        if (!cursor_bo)
                return -ENOENT;
 
-       ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+       ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
                        &mdp5_crtc->cursor.iova);
        if (ret)
                return -EINVAL;
@@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
 set_cursor:
        ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
        if (ret) {
-               dev_err(dev->dev, "failed to %sable cursor: %d\n",
+               DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
                                cursor_enable ? "en" : "dis", ret);
                goto end;
        }
index f93d5681267c7c56102bd82d47978a9eb9826792..65a871f9f0d9c1cb6f130e811de1605e57815b17 100644 (file)
@@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
        struct mdp5_hw_mixer *mixer = pipeline->mixer;
 
        if (unlikely(WARN_ON(!mixer))) {
-               dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+               DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
                        ctl->id);
                return -EINVAL;
        }
 
        if (pipeline->r_mixer) {
-               dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+               DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
                return -EINVAL;
        }
 
@@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
                mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
                return 0;
        } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
-               dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+               DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
                return -EINVAL;
        } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
-               dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+               DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
                return -EINVAL;
        }
 
@@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
                if ((ctl_mgr->ctls[c].status & checkm) == match)
                        goto found;
 
-       dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+       DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
        goto unlock;
 
 found:
@@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 
        ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
        if (!ctl_mgr) {
-               dev_err(dev->dev, "failed to allocate CTL manager\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
                ret = -ENOMEM;
                goto fail;
        }
 
        if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
-               dev_err(dev->dev, "Increase static pool size to at least %d\n",
+               DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
                                ctl_cfg->count);
                ret = -ENOSPC;
                goto fail;
@@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
                struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
 
                if (WARN_ON(!ctl_cfg->base[c])) {
-                       dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+                       DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
                        ret = -EINVAL;
                        spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
                        goto fail;
index bddd625ab91bd44a56824c3e30a8ea2edefe46ff..d27e35a217bd77f3ffbfe88f452bf1f7c8c7f51e 100644 (file)
@@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
                        minor->debugfs_root, minor);
 
        if (ret) {
-               dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+               DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
                return ret;
        }
 
@@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
        encoder = mdp5_encoder_init(dev, intf, ctl);
        if (IS_ERR(encoder)) {
-               dev_err(dev->dev, "failed to construct encoder\n");
+               DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
                return encoder;
        }
 
@@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
                int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
 
                if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
-                       dev_err(dev->dev, "failed to find dsi from intf %d\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
                                intf->num);
                        ret = -EINVAL;
                        break;
@@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
                break;
        }
        default:
-               dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+               DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
                ret = -EINVAL;
                break;
        }
@@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
                plane = mdp5_plane_init(dev, type);
                if (IS_ERR(plane)) {
                        ret = PTR_ERR(plane);
-                       dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
                        goto fail;
                }
                priv->planes[priv->num_planes++] = plane;
@@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
                crtc  = mdp5_crtc_init(dev, primary[i], cursor[i], i);
                if (IS_ERR(crtc)) {
                        ret = PTR_ERR(crtc);
-                       dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
                        goto fail;
                }
                priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
        *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
        *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
 
-       dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+       DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
 }
 
 static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
        struct device *dev = &pdev->dev;
        struct clk *clk = msm_clk_get(pdev, name);
        if (IS_ERR(clk) && mandatory) {
-               dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+               DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
                return PTR_ERR(clk);
        }
        if (IS_ERR(clk))
@@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (irq < 0) {
                ret = irq;
-               dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
                goto fail;
        }
 
@@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret) {
-                       dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+                       DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
                                ret);
                        goto fail;
                }
        } else {
-               dev_info(&pdev->dev,
+               DRM_DEV_INFO(&pdev->dev,
                         "no iommu, fallback to phys contig buffers for scanout\n");
                aspace = NULL;
        }
@@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 
        ret = modeset_init(mdp5_kms);
        if (ret) {
-               dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
                goto fail;
        }
 
@@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
                hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
                if (IS_ERR(hwpipe)) {
                        ret = PTR_ERR(hwpipe);
-                       dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
                                        pipe2name(pipes[i]), ret);
                        return ret;
                }
@@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
                mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
                if (IS_ERR(mixer)) {
                        ret = PTR_ERR(mixer);
-                       dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
                                i, ret);
                        return ret;
                }
@@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
 
                intf = kzalloc(sizeof(*intf), GFP_KERNEL);
                if (!intf) {
-                       dev_err(dev->dev, "failed to construct INTF%d\n", i);
+                       DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
                        return -ENOMEM;
                }
 
index 1cc4e57f0226f89c40c5b78baa3045138c89470d..889c2940692c8e4221c6fd1d54c27b1a6ffd8a34 100644 (file)
@@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
        d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
                                  mdp5_mdss);
        if (!d) {
-               dev_err(dev, "mdss irq domain add failed\n");
+               DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
                return -ENXIO;
        }
 
@@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
 
        ret = msm_mdss_get_clocks(mdp5_mdss);
        if (ret) {
-               dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
                goto fail;
        }
 
@@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
 
        ret = regulator_enable(mdp5_mdss->vdd);
        if (ret) {
-               dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+               DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
                        ret);
                goto fail;
        }
@@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
        ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
                               mdss_irq, 0, "mdss_isr", mdp5_mdss);
        if (ret) {
-               dev_err(dev->dev, "failed to init irq: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
                goto fail_irq;
        }
 
        ret = mdss_irq_domain_init(mdp5_mdss);
        if (ret) {
-               dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
                goto fail_irq;
        }
 
index 310459541e48d97fea4d0c2b9c3a2322be5401f5..be13140967b4e8a294263ed2632673aee73368b0 100644 (file)
@@ -125,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
 
        SET_PROPERTY(zpos, ZPOS, uint8_t);
 
-       dev_err(dev->dev, "Invalid property\n");
+       DRM_DEV_ERROR(dev->dev, "Invalid property\n");
        ret = -EINVAL;
 done:
        return ret;
@@ -153,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
 
        GET_PROPERTY(zpos, ZPOS, uint8_t);
 
-       dev_err(dev->dev, "Invalid property\n");
+       DRM_DEV_ERROR(dev->dev, "Invalid property\n");
        ret = -EINVAL;
 done:
        return ret;
@@ -658,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
 
        ret = calc_phase_step(src, dest, &phasex_step);
        if (ret) {
-               dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+               DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
                return ret;
        }
 
@@ -683,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
 
        ret = calc_phase_step(src, dest, &phasey_step);
        if (ret) {
-               dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+               DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
                return ret;
        }
 
index 96c2b828dba4a7cf2934c95a16fa092a6d1c0152..7cebcb2b3a379246e55faef1dcce6657d2ec3a3a 100644 (file)
@@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
 
        avail = cnt - bitmap_weight(state->state, cnt);
        if (nblks > avail) {
-               dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+               DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
                                nblks, avail);
                return -ENOSPC;
        }
@@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
                DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
                ret = smp_request_block(smp, state, cid, n);
                if (ret) {
-                       dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+                       DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
                                        n, ret);
                        return ret;
                }
index a9768f823290bd1037c90a49ccb97273cb46a9a5..7b2a1e6a881079ec39c6d838da7d743a30758642 100644 (file)
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
 
        phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
        if (!phy_node) {
-               dev_err(&pdev->dev, "cannot find phy device\n");
+               DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
                return -ENXIO;
        }
 
@@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
        of_node_put(phy_node);
 
        if (!phy_pdev || !msm_dsi->phy) {
-               dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+               DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
                return -EPROBE_DEFER;
        }
 
@@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 
        ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
        if (ret) {
-               dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
                goto fail;
        }
 
@@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
        msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
        if (IS_ERR(msm_dsi->bridge)) {
                ret = PTR_ERR(msm_dsi->bridge);
-               dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
                msm_dsi->bridge = NULL;
                goto fail;
        }
@@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
 
        if (IS_ERR(msm_dsi->connector)) {
                ret = PTR_ERR(msm_dsi->connector);
-               dev_err(dev->dev,
+               DRM_DEV_ERROR(dev->dev,
                        "failed to create dsi connector: %d\n", ret);
                msm_dsi->connector = NULL;
                goto fail;
index 9c6c523eacdcb7abe62c3501b5ff6f143be9db12..38e481d2d606f9f8d3a0600c8e5f5495db00a848 100644 (file)
@@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
                        msecs_to_jiffies(70));
 
        if (ret <= 0)
-               dev_err(dev, "wait for video done timed out\n");
+               DRM_DEV_ERROR(dev, "wait for video done timed out\n");
 
        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
 }
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
                return PTR_ERR(data);
        }
 
+       msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
        msm_host->tx_size = msm_host->tx_gem_obj->size;
 
        return 0;
@@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
 
        priv = dev->dev_private;
        if (msm_host->tx_gem_obj) {
-               msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+               msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
                drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
                msm_host->tx_gem_obj = NULL;
        }
@@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
        if (!dma_base)
                return -EINVAL;
 
-       return msm_gem_get_iova(msm_host->tx_gem_obj,
+       return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
                                priv->kms->aspace, dma_base);
 }
 
@@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 
        prop = of_find_property(ep, "data-lanes", &len);
        if (!prop) {
-               dev_dbg(dev,
+               DRM_DEV_DEBUG(dev,
                        "failed to find data lane mapping, using default\n");
                return 0;
        }
@@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        num_lanes = len / sizeof(u32);
 
        if (num_lanes < 1 || num_lanes > 4) {
-               dev_err(dev, "bad number of data lanes\n");
+               DRM_DEV_ERROR(dev, "bad number of data lanes\n");
                return -EINVAL;
        }
 
@@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
                                         num_lanes);
        if (ret) {
-               dev_err(dev, "failed to read lane data\n");
+               DRM_DEV_ERROR(dev, "failed to read lane data\n");
                return ret;
        }
 
@@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
                 */
                for (j = 0; j < num_lanes; j++) {
                        if (lane_map[j] < 0 || lane_map[j] > 3)
-                               dev_err(dev, "bad physical lane entry %u\n",
+                               DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
                                        lane_map[j]);
 
                        if (swap[lane_map[j]] != j)
@@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
         */
        endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
        if (!endpoint) {
-               dev_dbg(dev, "%s: no endpoint\n", __func__);
+               DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
                return 0;
        }
 
        ret = dsi_host_parse_lane_data(msm_host, endpoint);
        if (ret) {
-               dev_err(dev, "%s: invalid lane configuration %d\n",
+               DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
                        __func__, ret);
                ret = -EINVAL;
                goto err;
@@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
        /* Get panel node from the output port's endpoint data */
        device_node = of_graph_get_remote_node(np, 1, 0);
        if (!device_node) {
-               dev_dbg(dev, "%s: no valid device\n", __func__);
+               DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
                ret = -ENODEV;
                goto err;
        }
@@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
                msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
                                        "syscon-sfpb");
                if (IS_ERR(msm_host->sfpb)) {
-                       dev_err(dev, "%s: failed to get sfpb regmap\n",
+                       DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
                                __func__);
                        ret = PTR_ERR(msm_host->sfpb);
                }
@@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (msm_host->irq < 0) {
                ret = msm_host->irq;
-               dev_err(dev->dev, "failed to get irq: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
                return ret;
        }
 
@@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
                        dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
                        "dsi_isr", msm_host);
        if (ret < 0) {
-               dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+               DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
                                msm_host->irq, ret);
                return ret;
        }
index 9a9fa0c75a131083f32c57f5cd89a0e02c7b87d1..1760483b247e60a17f6b20e6933b83ecab392237 100644 (file)
@@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
 
        ret = devm_regulator_bulk_get(dev, num, s);
        if (ret < 0) {
-               dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+               DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
                                                __func__, ret);
                return ret;
        }
@@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
                        ret = regulator_set_load(s[i].consumer,
                                                        regs[i].enable_load);
                        if (ret < 0) {
-                               dev_err(dev,
+                               DRM_DEV_ERROR(dev,
                                        "regulator %d set op mode failed, %d\n",
                                        i, ret);
                                goto fail;
@@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
 
        ret = regulator_bulk_enable(num, s);
        if (ret < 0) {
-               dev_err(dev, "regulator enable failed, %d\n", ret);
+               DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
                goto fail;
        }
 
@@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
 
        ret = clk_prepare_enable(phy->ahb_clk);
        if (ret) {
-               dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+               DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
                pm_runtime_put_sync(dev);
        }
 
@@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
        phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
                                "DSI_PHY_REG");
        if (IS_ERR(phy->reg_base)) {
-               dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
                        __func__);
                ret = -ENOMEM;
                goto fail;
@@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
        phy->id = dsi_phy_get_id(phy);
        if (phy->id < 0) {
                ret = phy->id;
-               dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+               DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
                        __func__, ret);
                goto fail;
        }
@@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 
        phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
        if (IS_ERR(phy->base)) {
-               dev_err(dev, "%s: failed to map phy base\n", __func__);
+               DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
                ret = -ENOMEM;
                goto fail;
        }
 
        ret = dsi_phy_regulator_init(phy);
        if (ret) {
-               dev_err(dev, "%s: failed to init regulator\n", __func__);
+               DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
                goto fail;
        }
 
        phy->ahb_clk = msm_clk_get(pdev, "iface");
        if (IS_ERR(phy->ahb_clk)) {
-               dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+               DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
                ret = PTR_ERR(phy->ahb_clk);
                goto fail;
        }
@@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
 
        phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
        if (IS_ERR_OR_NULL(phy->pll))
-               dev_info(dev,
+               DRM_DEV_INFO(dev,
                        "%s: pll init failed: %ld, need separate pll clk driver\n",
                        __func__, PTR_ERR(phy->pll));
 
@@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
        ret = dsi_phy_enable_resource(phy);
        if (ret) {
-               dev_err(dev, "%s: resource enable failed, %d\n",
+               DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
                        __func__, ret);
                goto res_en_fail;
        }
 
        ret = dsi_phy_regulator_enable(phy);
        if (ret) {
-               dev_err(dev, "%s: regulator enable failed, %d\n",
+               DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
                        __func__, ret);
                goto reg_en_fail;
        }
 
        ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
        if (ret) {
-               dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+               DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
                goto phy_en_fail;
        }
 
@@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        if (phy->usecase != MSM_DSI_PHY_SLAVE) {
                ret = msm_dsi_pll_restore_state(phy->pll);
                if (ret) {
-                       dev_err(dev, "%s: failed to restore pll state, %d\n",
+                       DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
                                __func__, ret);
                        goto pll_restor_fail;
                }
index b3fffc8dbb2ab572aa688aabe8ef4546728e3fb8..44959e79ce28250c4da3c9f24cd8d83f778cb2d7 100644 (file)
@@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        DBG("");
 
        if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
-               dev_err(&phy->pdev->dev,
+               DRM_DEV_ERROR(&phy->pdev->dev,
                        "%s: D-PHY timing calculation failed\n", __func__);
                return -EINVAL;
        }
@@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
        ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
        if (ret) {
-               dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+               DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
                        __func__, ret);
                return ret;
        }
@@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
        phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
                                     "DSI_PHY_LANE");
        if (IS_ERR(phy->lane_base)) {
-               dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
                        __func__);
                return -ENOMEM;
        }
index 513f4234adc198c9507289fc058ce08f4e084bbf..a172c667e8bcffad675842077906ca0c28a42714 100644 (file)
@@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        void __iomem *lane_base = phy->lane_base;
 
        if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
-               dev_err(&phy->pdev->dev,
+               DRM_DEV_ERROR(&phy->pdev->dev,
                        "%s: D-PHY timing calculation failed\n", __func__);
                return -EINVAL;
        }
@@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
        ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
        if (ret) {
-               dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+               DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
                        __func__, ret);
                return ret;
        }
@@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
        phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
                                "DSI_PHY_LANE");
        if (IS_ERR(phy->lane_base)) {
-               dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
                        __func__);
                return -ENOMEM;
        }
index 1ca6c69516f57c5740055c6b66d2b4b26c18ff63..9ea9478d370785f4f8e0f897135a5268deabf8fa 100644 (file)
@@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        DBG("");
 
        if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-               dev_err(&phy->pdev->dev,
+               DRM_DEV_ERROR(&phy->pdev->dev,
                        "%s: D-PHY timing calculation failed\n", __func__);
                return -EINVAL;
        }
index 4972b52cbe447c2cb0083485aa51f6afe95ddec2..c79505d97fe83d83c0b1f2d54c717e609e331fb9 100644 (file)
@@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        DBG("");
 
        if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-               dev_err(&phy->pdev->dev,
+               DRM_DEV_ERROR(&phy->pdev->dev,
                        "%s: D-PHY timing calculation failed\n", __func__);
                return -EINVAL;
        }
index 39800446349848dce6bbfc9d2808d2d615f4152c..98790b44da48b2de94e1fa765430ba2c482ec70b 100644 (file)
@@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        DBG("");
 
        if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
-               dev_err(&phy->pdev->dev,
+               DRM_DEV_ERROR(&phy->pdev->dev,
                        "%s: D-PHY timing calculation failed\n", __func__);
                return -EINVAL;
        }
index 613e206fa4fc2ce8295fa8df3aa029e8811e9423..7a1fb4da2ad346be4c4ac48fdd7848b332593ac3 100644 (file)
@@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
        }
 
        if (IS_ERR(pll)) {
-               dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+               DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
                return pll;
        }
 
index 4c03f0b7343ed655c60111be4d09249bde463b28..e41f278e4e62864f418d9ba3e9bb23035ce4308d 100644 (file)
@@ -17,7 +17,7 @@
  *                              |                |
  *                              |                |
  *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
  *                 +---------+  |  +----------+  |  +----+
  *                              |                |
  *                              |                |         dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
  *                              |                |  +----+  |  |\  dsi0_pclk_mux
  *                              |                |--| /2 |--o--| \   |
  *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0pll
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
  *                              |------------------------------|  /     +---------+
  *                              |          +-----+             | /
  *                              -----------| /4? |--o----------|/
@@ -688,7 +688,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 
        hws[num++] = hw;
 
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
        snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
 
        /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -737,7 +737,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
 
        hws[num++] = hw;
 
-       snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
        snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
 
        /* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -760,7 +760,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
        ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
                                     pll_10nm->hw_data);
        if (ret) {
-               dev_err(dev, "failed to register clk provider: %d\n", ret);
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
                return ret;
        }
 
@@ -788,13 +788,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
 
        pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
        if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
-               dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
                return ERR_PTR(-ENOMEM);
        }
 
        pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
        if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
-               dev_err(&pdev->dev, "failed to map PLL base\n");
+               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
                return ERR_PTR(-ENOMEM);
        }
 
@@ -813,7 +813,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
 
        ret = pll_10nm_register(pll_10nm);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
                return ERR_PTR(ret);
        }
 
index 71fe60e5f01f1e05e99b45d35db3e47e3dba0bf6..0e18cddd6f22e0eb4bba70ff4ac52af312c9b9d4 100644 (file)
@@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
                                         POLL_TIMEOUT_US);
 
        if (unlikely(!locked))
-               dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+               DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
        else
                DBG("DSI PLL lock success");
 
@@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
        ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
                                        cached_state->vco_rate, 0);
        if (ret) {
-               dev_err(&pll_14nm->pdev->dev,
+               DRM_DEV_ERROR(&pll_14nm->pdev->dev,
                        "restore vco rate failed. ret=%d\n", ret);
                return ret;
        }
@@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
        ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
                                     pll_14nm->hw_data);
        if (ret) {
-               dev_err(dev, "failed to register clk provider: %d\n", ret);
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
                return ret;
        }
 
@@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
 
        pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
        if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
-               dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
                return ERR_PTR(-ENOMEM);
        }
 
        pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
        if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
-               dev_err(&pdev->dev, "failed to map PLL base\n");
+               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
 
        ret = pll_14nm_register(pll_14nm);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
                return ERR_PTR(ret);
        }
 
index 26e3a01a99c2b71dde9fed5bd548ee17b61e2df4..dcbbaeb1b1fbb72a5ff66e04c2e4e984ece55f64 100644 (file)
@@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
                if (rate <= lpfr_lut[i].vco_rate)
                        break;
        if (i == LPFR_LUT_SIZE) {
-               dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+               DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
                                rate);
                return -EINVAL;
        }
@@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
        }
 
        if (unlikely(!locked))
-               dev_err(dev, "DSI PLL lock failed\n");
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
        else
                DBG("DSI PLL Lock success");
 
@@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
        locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
 
        if (unlikely(!locked))
-               dev_err(dev, "DSI PLL lock failed\n");
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
        else
                DBG("DSI PLL lock success");
 
@@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
        ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
                                        cached_state->vco_rate, 0);
        if (ret) {
-               dev_err(&pll_28nm->pdev->dev,
+               DRM_DEV_ERROR(&pll_28nm->pdev->dev,
                        "restore vco rate failed. ret=%d\n", ret);
                return ret;
        }
@@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
        ret = of_clk_add_provider(dev->of_node,
                        of_clk_src_onecell_get, &pll_28nm->clk_data);
        if (ret) {
-               dev_err(dev, "failed to register clk provider: %d\n", ret);
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
                return ret;
        }
 
@@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
 
        pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
        if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-               dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
                pll->en_seq_cnt = 1;
                pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
        } else {
-               dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+               DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
                return ERR_PTR(-EINVAL);
        }
 
        ret = pll_28nm_register(pll_28nm);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
                return ERR_PTR(ret);
        }
 
index 49008451085b86ccb84ee3760c406554db51fd49..d6897464755f605e9ffe46187c91b56f18638e3b 100644 (file)
@@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
        locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
 
        if (unlikely(!locked))
-               dev_err(dev, "DSI PLL lock failed\n");
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
        else
                DBG("DSI PLL lock success");
 
@@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
        ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
                                        cached_state->vco_rate, 0);
        if (ret) {
-               dev_err(&pll_28nm->pdev->dev,
+               DRM_DEV_ERROR(&pll_28nm->pdev->dev,
                        "restore vco rate failed. ret=%d\n", ret);
                return ret;
        }
@@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
        ret = of_clk_add_provider(dev->of_node,
                        of_clk_src_onecell_get, &pll_28nm->clk_data);
        if (ret) {
-               dev_err(dev, "failed to register clk provider: %d\n", ret);
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
                return ret;
        }
 
@@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
 
        pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
        if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-               dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
 
        ret = pll_28nm_register(pll_28nm);
        if (ret) {
-               dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
                return ERR_PTR(ret);
        }
 
index 0940e84b2821b6df619b901f4c25f2fbf9297828..6a63aba98a3073b3143cf34d37fc36f385cd5c89 100644 (file)
@@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
        edp->bridge = msm_edp_bridge_init(edp);
        if (IS_ERR(edp->bridge)) {
                ret = PTR_ERR(edp->bridge);
-               dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
                edp->bridge = NULL;
                goto fail;
        }
@@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
        edp->connector = msm_edp_connector_init(edp);
        if (IS_ERR(edp->connector)) {
                ret = PTR_ERR(edp->connector);
-               dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
                edp->connector = NULL;
                goto fail;
        }
@@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
        edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (edp->irq < 0) {
                ret = edp->irq;
-               dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
                goto fail;
        }
 
@@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
                        edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
                        "edp_isr", edp);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+               DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
                                edp->irq, ret);
                goto fail;
        }
index 23670907a29d2420f92fdab966cf97fb7b23129d..f0725761b327f1c25faf9b4540877d4fa4ddb44e 100644 (file)
@@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
 
        phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
        if (!phy_node) {
-               dev_err(&pdev->dev, "cannot find phy device\n");
+               DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
                return -ENXIO;
        }
 
@@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
        of_node_put(phy_node);
 
        if (!phy_pdev || !hdmi->phy) {
-               dev_err(&pdev->dev, "phy driver is not ready\n");
+               DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
                return -EPROBE_DEFER;
        }
 
@@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
        hdmi->qfprom_mmio = msm_ioremap(pdev,
                config->qfprom_mmio_name, "HDMI_QFPROM");
        if (IS_ERR(hdmi->qfprom_mmio)) {
-               dev_info(&pdev->dev, "can't find qfprom resource\n");
+               DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
                hdmi->qfprom_mmio = NULL;
        }
 
@@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
                                config->hpd_reg_names[i]);
                if (IS_ERR(reg)) {
                        ret = PTR_ERR(reg);
-                       dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
                                        config->hpd_reg_names[i], ret);
                        goto fail;
                }
@@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
                                config->pwr_reg_names[i]);
                if (IS_ERR(reg)) {
                        ret = PTR_ERR(reg);
-                       dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
                                        config->pwr_reg_names[i], ret);
                        goto fail;
                }
@@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
                clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
                if (IS_ERR(clk)) {
                        ret = PTR_ERR(clk);
-                       dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+                       DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
                                        config->hpd_clk_names[i], ret);
                        goto fail;
                }
@@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
                clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
                if (IS_ERR(clk)) {
                        ret = PTR_ERR(clk);
-                       dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+                       DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
                                        config->pwr_clk_names[i], ret);
                        goto fail;
                }
@@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
        hdmi->i2c = msm_hdmi_i2c_init(hdmi);
        if (IS_ERR(hdmi->i2c)) {
                ret = PTR_ERR(hdmi->i2c);
-               dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+               DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
                hdmi->i2c = NULL;
                goto fail;
        }
 
        ret = msm_hdmi_get_phy(hdmi);
        if (ret) {
-               dev_err(&pdev->dev, "failed to get phy\n");
+               DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
                goto fail;
        }
 
@@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
        hdmi->bridge = msm_hdmi_bridge_init(hdmi);
        if (IS_ERR(hdmi->bridge)) {
                ret = PTR_ERR(hdmi->bridge);
-               dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
                hdmi->bridge = NULL;
                goto fail;
        }
@@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
        hdmi->connector = msm_hdmi_connector_init(hdmi);
        if (IS_ERR(hdmi->connector)) {
                ret = PTR_ERR(hdmi->connector);
-               dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
                hdmi->connector = NULL;
                goto fail;
        }
@@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
        hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (hdmi->irq < 0) {
                ret = hdmi->irq;
-               dev_err(dev->dev, "failed to get irq: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
                goto fail;
        }
 
@@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
                        msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
                        "hdmi_isr", hdmi);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+               DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
                                hdmi->irq, ret);
                goto fail;
        }
@@ -476,7 +476,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
        unsigned int level_shift  = 0; /* 0dB */
        bool down_mix = false;
 
-       dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+       DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
                 params->sample_width, params->cea.channels);
 
        switch (params->cea.channels) {
@@ -527,7 +527,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
                rate = HDMI_SAMPLE_RATE_192KHZ;
                break;
        default:
-               dev_err(dev, "rate[%d] not supported!\n",
+               DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
                        params->sample_rate);
                return -EINVAL;
        }
@@ -579,7 +579,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
        hdmi_cfg = (struct hdmi_platform_config *)
                        of_device_get_match_data(dev);
        if (!hdmi_cfg) {
-               dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
+               DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
                return -ENXIO;
        }
 
index 7e357077ed2634dadb2436c6c5abfb0d6625583b..98d61c690260f49a09228c75ee325738ed484aa5 100644 (file)
@@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
        for (i = 0; i < config->pwr_reg_cnt; i++) {
                ret = regulator_enable(hdmi->pwr_regs[i]);
                if (ret) {
-                       dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
                                        config->pwr_reg_names[i], ret);
                }
        }
@@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
                DBG("pixclock: %lu", hdmi->pixclock);
                ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
                if (ret) {
-                       dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
                                        config->pwr_clk_names[0], ret);
                }
        }
@@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
        for (i = 0; i < config->pwr_clk_cnt; i++) {
                ret = clk_prepare_enable(hdmi->pwr_clks[i]);
                if (ret) {
-                       dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
                                        config->pwr_clk_names[i], ret);
                }
        }
@@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
        for (i = 0; i < config->pwr_reg_cnt; i++) {
                ret = regulator_disable(hdmi->pwr_regs[i]);
                if (ret) {
-                       dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
                                        config->pwr_reg_names[i], ret);
                }
        }
@@ -105,7 +105,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
 
        len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (len < 0) {
-               dev_err(&hdmi->pdev->dev,
+               DRM_DEV_ERROR(&hdmi->pdev->dev,
                        "failed to configure avi infoframe\n");
                return;
        }
index e9c9a0af508e8c41bc12e91fc13d2f23b5041f33..99f2f10382586eb6317576dfb4f99114f7620f0d 100644 (file)
@@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
                        if (gpio.num != -1) {
                                ret = gpio_request(gpio.num, gpio.label);
                                if (ret) {
-                                       dev_err(dev,
+                                       DRM_DEV_ERROR(dev,
                                                "'%s'(%d) gpio_request failed: %d\n",
                                                gpio.label, gpio.num, ret);
                                        goto err;
@@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
 
                        ret = clk_prepare_enable(hdmi->hpd_clks[i]);
                        if (ret) {
-                               dev_err(dev,
+                               DRM_DEV_ERROR(dev,
                                        "failed to enable hpd clk: %s (%d)\n",
                                        config->hpd_clk_names[i], ret);
                        }
@@ -179,7 +179,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
        for (i = 0; i < config->hpd_reg_cnt; i++) {
                ret = regulator_enable(hdmi->hpd_regs[i]);
                if (ret) {
-                       dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
                                        config->hpd_reg_names[i], ret);
                        goto fail;
                }
@@ -187,13 +187,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
 
        ret = pinctrl_pm_select_default_state(dev);
        if (ret) {
-               dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+               DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
                goto fail;
        }
 
        ret = gpio_config(hdmi, true);
        if (ret) {
-               dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+               DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
                goto fail;
        }
 
index 73e20219d431a78c4ec9d3f335c430efbf2c49af..25d2fe2c60e8ad701fa06e31156f1f4b93e66334 100644 (file)
@@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
        } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
 
        if (!retry) {
-               dev_err(dev->dev, "timeout waiting for DDC\n");
+               DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
                return -ETIMEDOUT;
        }
 
index 4157722d6b4dc897bd46cda2ce27436b2caa368b..1f4331ed69bd6a4938bce6229e0ca18df394b095 100644 (file)
@@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
                reg = devm_regulator_get(dev, cfg->reg_names[i]);
                if (IS_ERR(reg)) {
                        ret = PTR_ERR(reg);
-                       dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
                                cfg->reg_names[i], ret);
                        return ret;
                }
@@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
                clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
                if (IS_ERR(clk)) {
                        ret = PTR_ERR(clk);
-                       dev_err(dev, "failed to get phy clock: %s (%d)\n",
+                       DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
                                cfg->clk_names[i], ret);
                        return ret;
                }
@@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
        for (i = 0; i < cfg->num_regs; i++) {
                ret = regulator_enable(phy->regs[i]);
                if (ret)
-                       dev_err(dev, "failed to enable regulator: %s (%d)\n",
+                       DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
                                cfg->reg_names[i], ret);
        }
 
        for (i = 0; i < cfg->num_clks; i++) {
                ret = clk_prepare_enable(phy->clks[i]);
                if (ret)
-                       dev_err(dev, "failed to enable clock: %s (%d)\n",
+                       DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
                                cfg->clk_names[i], ret);
        }
 
@@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
 
        phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
        if (IS_ERR(phy->mmio)) {
-               dev_err(dev, "%s: failed to map phy base\n", __func__);
+               DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
                return -ENOMEM;
        }
 
@@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
 
        ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
        if (ret) {
-               dev_err(dev, "couldn't init PLL\n");
+               DRM_DEV_ERROR(dev, "couldn't init PLL\n");
                msm_hdmi_phy_resource_disable(phy);
                return ret;
        }
index 0df504c61833c5e614a85698cab31a0c8027fe70..318708f26731e3b1540b3f0fef2039f2e54a96bf 100644 (file)
@@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
        pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
        if (IS_ERR(pll->mmio_qserdes_com)) {
-               dev_err(dev, "failed to map pll base\n");
+               DRM_DEV_ERROR(dev, "failed to map pll base\n");
                return -ENOMEM;
        }
 
@@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
                pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
                if (IS_ERR(pll->mmio_qserdes_tx[i])) {
-                       dev_err(dev, "failed to map pll base\n");
+                       DRM_DEV_ERROR(dev, "failed to map pll base\n");
                        return -ENOMEM;
                }
        }
@@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 
        clk = devm_clk_register(dev, &pll->clk_hw);
        if (IS_ERR(clk)) {
-               dev_err(dev, "failed to register pll clock\n");
+               DRM_DEV_ERROR(dev, "failed to register pll clock\n");
                return -EINVAL;
        }
 
index 99590758c68b7cf8e296928db1bab5ce5b2df4c4..c6dae6e437f97460232df84d9ae0a499d343f30c 100644 (file)
@@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 
        pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
        if (IS_ERR(pll->mmio)) {
-               dev_err(dev, "failed to map pll base\n");
+               DRM_DEV_ERROR(dev, "failed to map pll base\n");
                return -ENOMEM;
        }
 
@@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
 
        clk = devm_clk_register(dev, &pll->clk_hw);
        if (IS_ERR(clk)) {
-               dev_err(dev, "failed to register pll clock\n");
+               DRM_DEV_ERROR(dev, "failed to register pll clock\n");
                return -EINVAL;
        }
 
index 4bcdeca7479db6a2481c2b6e8ad38defe7f280de..ff291dd0744b34d545012678d62e8f0e0b846f6b 100644 (file)
@@ -78,7 +78,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
                kms->funcs->commit(kms, state);
        }
 
-       msm_atomic_wait_for_commit_done(dev, state);
+       if (!state->legacy_cursor_update)
+               msm_atomic_wait_for_commit_done(dev, state);
 
        kms->funcs->complete_commit(kms, state);
 
index f0da0d3c8a80f7cf9ab5082095aed6df3e3c9529..42a2cba789983272b24f0bd30f5138ba898bfc8d 100644 (file)
@@ -194,13 +194,13 @@ static int late_init_minor(struct drm_minor *minor)
 
        ret = msm_rd_debugfs_init(minor);
        if (ret) {
-               dev_err(minor->dev->dev, "could not install rd debugfs\n");
+               DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
                return ret;
        }
 
        ret = msm_perf_debugfs_init(minor);
        if (ret) {
-               dev_err(minor->dev->dev, "could not install perf debugfs\n");
+               DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
                return ret;
        }
 
@@ -228,14 +228,14 @@ int msm_debugfs_init(struct drm_minor *minor)
                        minor->debugfs_root, minor);
 
        if (ret) {
-               dev_err(dev->dev, "could not install msm_debugfs_list\n");
+               DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
                return ret;
        }
 
        debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
                dev, &msm_gpu_fops);
 
-       if (priv->kms->funcs->debugfs_init) {
+       if (priv->kms && priv->kms->funcs->debugfs_init) {
                ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
                if (ret)
                        return ret;
index 5e758d95751a6f908a5cd1079c7ec1f81ad3b44e..6265be8f4119304c5a2799775f0274e8bfc3b3a6 100644 (file)
 #include "msm_drv.h"
 #include "msm_debugfs.h"
 #include "msm_fence.h"
+#include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
+#include "adreno/adreno_gpu.h"
 
 
 /*
  * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
  *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
  *           MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ *           GEM object's debug name
  */
 #define MSM_VERSION_MAJOR      1
-#define MSM_VERSION_MINOR      3
+#define MSM_VERSION_MINOR      4
 #define MSM_VERSION_PATCHLEVEL 0
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -170,7 +174,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
        if (!res) {
-               dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+               DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -178,7 +182,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 
        ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
        if (!ptr) {
-               dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+               DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -358,6 +362,14 @@ static int get_mdp_ver(struct platform_device *pdev)
 
 #include <linux/of_address.h>
 
+bool msm_use_mmu(struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+
+       /* a2xx comes with its own MMU */
+       return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
 static int msm_init_vram(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -396,7 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
                 * Grab the entire CMA chunk carved out in early startup in
                 * mach-msm:
                 */
-       } else if (!iommu_present(&platform_bus_type)) {
+       } else if (!msm_use_mmu(dev)) {
                DRM_INFO("using %s VRAM carveout\n", vram);
                size = memparse(vram, NULL);
        }
@@ -419,12 +431,12 @@ static int msm_init_vram(struct drm_device *dev)
                p = dma_alloc_attrs(dev->dev, size,
                                &priv->vram.paddr, GFP_KERNEL, attrs);
                if (!p) {
-                       dev_err(dev->dev, "failed to allocate VRAM\n");
+                       DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
                        priv->vram.paddr = 0;
                        return -ENOMEM;
                }
 
-               dev_info(dev->dev, "VRAM: %08x->%08x\n",
+               DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
                                (uint32_t)priv->vram.paddr,
                                (uint32_t)(priv->vram.paddr + size));
        }
@@ -444,7 +456,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
        ddev = drm_dev_alloc(drv, dev);
        if (IS_ERR(ddev)) {
-               dev_err(dev, "failed to allocate drm_device\n");
+               DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
                return PTR_ERR(ddev);
        }
 
@@ -508,19 +520,16 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                priv->kms = kms;
                break;
        default:
-               kms = ERR_PTR(-ENODEV);
+               /* valid only for the dummy headless case, where of_node=NULL */
+               WARN_ON(dev->of_node);
+               kms = NULL;
                break;
        }
 
        if (IS_ERR(kms)) {
-               /*
-                * NOTE: once we have GPU support, having no kms should not
-                * be considered fatal.. ideally we would still support gpu
-                * and (for example) use dmabuf/prime to share buffers with
-                * imx drm driver on iMX5
-                */
-               dev_err(dev, "failed to load kms\n");
+               DRM_DEV_ERROR(dev, "failed to load kms\n");
                ret = PTR_ERR(kms);
+               priv->kms = NULL;
                goto err_msm_uninit;
        }
 
@@ -530,7 +539,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        if (kms) {
                ret = kms->funcs->hw_init(kms);
                if (ret) {
-                       dev_err(dev, "kms hw init failed: %d\n", ret);
+                       DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
                        goto err_msm_uninit;
                }
        }
@@ -561,7 +570,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                                                                        ret);
 
                if (IS_ERR(priv->disp_thread[i].thread)) {
-                       dev_err(dev, "failed to create crtc_commit kthread\n");
+                       DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
                        priv->disp_thread[i].thread = NULL;
                }
 
@@ -573,6 +582,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                        kthread_run(kthread_worker_fn,
                                &priv->event_thread[i].worker,
                                "crtc_event:%d", priv->event_thread[i].crtc_id);
+
                /**
                 * event thread should also run at same priority as disp_thread
                 * because it is handling frame_done events. A lower priority
@@ -613,7 +623,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
        ret = drm_vblank_init(ddev, priv->num_crtcs);
        if (ret < 0) {
-               dev_err(dev, "failed to initialize vblank\n");
+               DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
                goto err_msm_uninit;
        }
 
@@ -622,7 +632,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                ret = drm_irq_install(ddev, kms->irq);
                pm_runtime_put_sync(dev);
                if (ret < 0) {
-                       dev_err(dev, "failed to install IRQ handler\n");
+                       DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
                        goto err_msm_uninit;
                }
        }
@@ -634,7 +644,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        drm_mode_config_reset(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       if (fbdev)
+       if (kms && fbdev)
                priv->fbdev = msm_fbdev_init(ddev);
 #endif
 
@@ -742,7 +752,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
        BUG_ON(!kms);
-       return kms->funcs->irq_postinstall(kms);
+
+       if (kms->funcs->irq_postinstall)
+               return kms->funcs->irq_postinstall(kms);
+
+       return 0;
 }
 
 static void msm_irq_uninstall(struct drm_device *dev)
@@ -809,7 +823,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
        }
 
        return msm_gem_new_handle(dev, file, args->size,
-                       args->flags, &args->handle);
+                       args->flags, &args->handle, NULL);
 }
 
 static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -867,6 +881,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
        if (!priv->gpu)
                return -EINVAL;
 
+       /*
+        * Don't pin the memory here - just get an address so that userspace can
+        * be productive
+        */
        return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
 }
 
@@ -875,23 +893,66 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
 {
        struct drm_msm_gem_info *args = data;
        struct drm_gem_object *obj;
-       int ret = 0;
+       struct msm_gem_object *msm_obj;
+       int i, ret = 0;
 
-       if (args->flags & ~MSM_INFO_FLAGS)
+       if (args->pad)
                return -EINVAL;
 
+       switch (args->info) {
+       case MSM_INFO_GET_OFFSET:
+       case MSM_INFO_GET_IOVA:
+               /* value returned as immediate, not pointer, so len==0: */
+               if (args->len)
+                       return -EINVAL;
+               break;
+       case MSM_INFO_SET_NAME:
+       case MSM_INFO_GET_NAME:
+               break;
+       default:
+               return -EINVAL;
+       }
+
        obj = drm_gem_object_lookup(file, args->handle);
        if (!obj)
                return -ENOENT;
 
-       if (args->flags & MSM_INFO_IOVA) {
-               uint64_t iova;
+       msm_obj = to_msm_bo(obj);
 
-               ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
-               if (!ret)
-                       args->offset = iova;
-       } else {
-               args->offset = msm_gem_mmap_offset(obj);
+       switch (args->info) {
+       case MSM_INFO_GET_OFFSET:
+               args->value = msm_gem_mmap_offset(obj);
+               break;
+       case MSM_INFO_GET_IOVA:
+               ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+               break;
+       case MSM_INFO_SET_NAME:
+               /* length check should leave room for terminating null: */
+               if (args->len >= sizeof(msm_obj->name)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               ret = copy_from_user(msm_obj->name,
+                       u64_to_user_ptr(args->value), args->len);
+               msm_obj->name[args->len] = '\0';
+               for (i = 0; i < args->len; i++) {
+                       if (!isprint(msm_obj->name[i])) {
+                               msm_obj->name[i] = '\0';
+                               break;
+                       }
+               }
+               break;
+       case MSM_INFO_GET_NAME:
+               if (args->value && (args->len < strlen(msm_obj->name))) {
+                       ret = -EINVAL;
+                       break;
+               }
+               args->len = strlen(msm_obj->name);
+               if (args->value) {
+                       ret = copy_to_user(u64_to_user_ptr(args->value),
+                                       msm_obj->name, args->len);
+               }
+               break;
        }
 
        drm_gem_object_put_unlocked(obj);
@@ -1070,18 +1131,15 @@ static int msm_pm_suspend(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
-       struct msm_kms *kms = priv->kms;
 
-       /* TODO: Use atomic helper suspend/resume */
-       if (kms && kms->funcs && kms->funcs->pm_suspend)
-               return kms->funcs->pm_suspend(dev);
-
-       drm_kms_helper_poll_disable(ddev);
+       if (WARN_ON(priv->pm_state))
+               drm_atomic_state_put(priv->pm_state);
 
        priv->pm_state = drm_atomic_helper_suspend(ddev);
        if (IS_ERR(priv->pm_state)) {
-               drm_kms_helper_poll_enable(ddev);
-               return PTR_ERR(priv->pm_state);
+               int ret = PTR_ERR(priv->pm_state);
+               DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+               return ret;
        }
 
        return 0;
@@ -1091,16 +1149,16 @@ static int msm_pm_resume(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct msm_drm_private *priv = ddev->dev_private;
-       struct msm_kms *kms = priv->kms;
+       int ret;
 
-       /* TODO: Use atomic helper suspend/resume */
-       if (kms && kms->funcs && kms->funcs->pm_resume)
-               return kms->funcs->pm_resume(dev);
+       if (WARN_ON(!priv->pm_state))
+               return -ENOENT;
 
-       drm_atomic_helper_resume(ddev, priv->pm_state);
-       drm_kms_helper_poll_enable(ddev);
+       ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+       if (!ret)
+               priv->pm_state = NULL;
 
-       return 0;
+       return ret;
 }
 #endif
 
@@ -1185,7 +1243,7 @@ static int add_components_mdp(struct device *mdp_dev,
 
                ret = of_graph_parse_endpoint(ep_node, &ep);
                if (ret) {
-                       dev_err(mdp_dev, "unable to parse port endpoint\n");
+                       DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
                        of_node_put(ep_node);
                        return ret;
                }
@@ -1207,8 +1265,10 @@ static int add_components_mdp(struct device *mdp_dev,
                if (!intf)
                        continue;
 
-               drm_of_component_match_add(master_dev, matchptr, compare_of,
-                                          intf);
+               if (of_device_is_available(intf))
+                       drm_of_component_match_add(master_dev, matchptr,
+                                                  compare_of, intf);
+
                of_node_put(intf);
        }
 
@@ -1236,13 +1296,13 @@ static int add_display_components(struct device *dev,
            of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
                if (ret) {
-                       dev_err(dev, "failed to populate children devices\n");
+                       DRM_DEV_ERROR(dev, "failed to populate children devices\n");
                        return ret;
                }
 
                mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
                if (!mdp_dev) {
-                       dev_err(dev, "failed to find MDSS MDP node\n");
+                       DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
                        of_platform_depopulate(dev);
                        return -ENODEV;
                }
@@ -1272,6 +1332,7 @@ static int add_display_components(struct device *dev,
 static const struct of_device_id msm_gpu_match[] = {
        { .compatible = "qcom,adreno" },
        { .compatible = "qcom,adreno-3xx" },
+       { .compatible = "amd,imageon" },
        { .compatible = "qcom,kgsl-3d0" },
        { },
 };
@@ -1316,9 +1377,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
        struct component_match *match = NULL;
        int ret;
 
-       ret = add_display_components(&pdev->dev, &match);
-       if (ret)
-               return ret;
+       if (get_mdp_ver(pdev)) {
+               ret = add_display_components(&pdev->dev, &match);
+               if (ret)
+                       return ret;
+       }
 
        ret = add_gpu_components(&pdev->dev, &match);
        if (ret)
index 9d11f321f5a9286c5b046c8fc46f04622ed22d2c..9cd6a96c6bf2a522d413681f20d918753921f554 100644 (file)
@@ -179,6 +179,8 @@ struct msm_drm_private {
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
        struct msm_file_private *lastctx;
+       /* gpu is only set on open(), but we need this info earlier */
+       bool is_a2xx;
 
        struct drm_fb_helper *fbdev;
 
@@ -241,10 +243,16 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma);
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-               struct msm_gem_vma *vma, struct sg_table *sgt);
+               struct msm_gem_vma *vma);
 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma);
 
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
@@ -252,9 +260,15 @@ struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
                const char *name);
 
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+               const char *name, uint64_t va_start, uint64_t va_end);
+
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
+bool msm_use_mmu(struct drm_device *dev);
+
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
@@ -269,12 +283,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
 int msm_gem_get_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova);
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
 void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
-               struct msm_gem_address_space *aspace);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -301,7 +317,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-               uint32_t size, uint32_t flags, uint32_t *handle);
+               uint32_t size, uint32_t flags, uint32_t *handle, char *name);
 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
                uint32_t size, uint32_t flags);
 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,9 +328,13 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
                uint32_t flags, struct msm_gem_address_space *aspace,
                struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+               struct msm_gem_address_space *aspace, bool locked);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
                struct msm_gem_address_space *aspace);
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
index 2a7348aeb38d1a785c79b874a4d6fef9bb080017..67dfd8d3dc12caaaf9c17c7e5c3d6dbc344bb696 100644 (file)
@@ -66,7 +66,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
        uint64_t iova;
 
        for (i = 0; i < n; i++) {
-               ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+               ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
                DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
                if (ret)
                        return ret;
@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
        int i, n = fb->format->num_planes;
 
        for (i = 0; i < n; i++)
-               msm_gem_put_iova(fb->obj[i], aspace);
+               msm_gem_unpin_iova(fb->obj[i], aspace);
 }
 
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
        format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
                        mode_cmd->modifier[0]);
        if (!format) {
-               dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+               DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
                                (char *)&mode_cmd->pixel_format);
                ret = -EINVAL;
                goto fail;
@@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
        ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
        if (ret) {
-               dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
                goto fail;
        }
 
@@ -233,13 +233,15 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
                bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
        }
        if (IS_ERR(bo)) {
-               dev_err(dev->dev, "failed to allocate buffer object\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
                return ERR_CAST(bo);
        }
 
+       msm_gem_object_set_name(bo, "stolenfb");
+
        fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
        if (IS_ERR(fb)) {
-               dev_err(dev->dev, "failed to allocate fb\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
                /* note: if fb creation failed, we can't rely on fb destroy
                 * to unref the bo:
                 */
index 456622b4633558b7d0d98a1d57729aac9d33c57f..c03e860ba737f8d56d0759c239b416086c66fb55 100644 (file)
@@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
                        sizes->surface_height, pitch, format);
 
        if (IS_ERR(fb)) {
-               dev_err(dev->dev, "failed to allocate fb\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
                return PTR_ERR(fb);
        }
 
@@ -104,15 +104,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
         * in panic (ie. lock-safe, etc) we could avoid pinning the
         * buffer now:
         */
-       ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+       ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
        if (ret) {
-               dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
                goto fail_unlock;
        }
 
        fbi = drm_fb_helper_alloc_fbi(helper);
        if (IS_ERR(fbi)) {
-               dev_err(dev->dev, "failed to allocate fb info\n");
+               DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
                ret = PTR_ERR(fbi);
                goto fail_unlock;
        }
@@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
 
        ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
        if (ret) {
-               dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+               DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
                goto fail;
        }
 
index f59ca27a4a357492f96d0b7e37c76536037b40d6..51a95da694d8d498dee29bd91ddb880b3478c356 100644 (file)
@@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
                        p = get_pages_vram(obj, npages);
 
                if (IS_ERR(p)) {
-                       dev_err(dev->dev, "could not get pages: %ld\n",
+                       DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
                                        PTR_ERR(p));
                        return p;
                }
@@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
                if (IS_ERR(msm_obj->sgt)) {
                        void *ptr = ERR_CAST(msm_obj->sgt);
 
-                       dev_err(dev->dev, "failed to allocate sgt\n");
+                       DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
                        msm_obj->sgt = NULL;
                        return ptr;
                }
@@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
        ret = drm_gem_create_mmap_offset(obj);
 
        if (ret) {
-               dev_err(dev->dev, "could not allocate mmap offset\n");
+               DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
                return 0;
        }
 
@@ -352,63 +352,104 @@ put_iova(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
        list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
-               msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+               msm_gem_purge_vma(vma->aspace, vma);
+               msm_gem_close_vma(vma->aspace, vma);
                del_vma(vma);
        }
 }
 
-/* get iova, taking a reference.  Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
        int ret = 0;
 
-       mutex_lock(&msm_obj->lock);
-
-       if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
-               mutex_unlock(&msm_obj->lock);
-               return -EBUSY;
-       }
+       WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
        vma = lookup_vma(obj, aspace);
 
        if (!vma) {
-               struct page **pages;
-
                vma = add_vma(obj, aspace);
-               if (IS_ERR(vma)) {
-                       ret = PTR_ERR(vma);
-                       goto unlock;
-               }
+               if (IS_ERR(vma))
+                       return PTR_ERR(vma);
 
-               pages = get_pages(obj);
-               if (IS_ERR(pages)) {
-                       ret = PTR_ERR(pages);
-                       goto fail;
+               ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+               if (ret) {
+                       del_vma(vma);
+                       return ret;
                }
-
-               ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
-                               obj->size >> PAGE_SHIFT);
-               if (ret)
-                       goto fail;
        }
 
        *iova = vma->iova;
+       return 0;
+}
+
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *vma;
+       struct page **pages;
+
+       WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+       if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+               return -EBUSY;
+
+       vma = lookup_vma(obj, aspace);
+       if (WARN_ON(!vma))
+               return -EINVAL;
+
+       pages = get_pages(obj);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+                       obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       u64 local;
+       int ret;
+
+       mutex_lock(&msm_obj->lock);
+
+       ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+       if (!ret)
+               ret = msm_gem_pin_iova(obj, aspace);
+
+       if (!ret)
+               *iova = local;
 
        mutex_unlock(&msm_obj->lock);
-       return 0;
+       return ret;
+}
 
-fail:
-       del_vma(vma);
-unlock:
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+               struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int ret;
+
+       mutex_lock(&msm_obj->lock);
+       ret = msm_gem_get_iova_locked(obj, aspace, iova);
        mutex_unlock(&msm_obj->lock);
+
        return ret;
 }
 
 /* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
  */
 uint64_t msm_gem_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace)
@@ -424,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
        return vma ? vma->iova : 0;
 }
 
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace)
 {
-       // XXX TODO ..
-       // NOTE: probably don't need a _locked() version.. we wouldn't
-       // normally unmap here, but instead just mark that it could be
-       // unmapped (if the iova refcnt drops to zero), but then later
-       // if another _get_iova_locked() fails we can start unmapping
-       // things that are no longer needed..
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_gem_vma *vma;
+
+       mutex_lock(&msm_obj->lock);
+       vma = lookup_vma(obj, aspace);
+
+       if (!WARN_ON(!vma))
+               msm_gem_unmap_vma(aspace, vma);
+
+       mutex_unlock(&msm_obj->lock);
 }
 
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
        args->pitch = align_pitch(args->width, args->bpp);
        args->size  = PAGE_ALIGN(args->pitch * args->height);
        return msm_gem_new_handle(dev, file, args->size,
-                       MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+                       MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 }
 
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +523,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
        mutex_lock(&msm_obj->lock);
 
        if (WARN_ON(msm_obj->madv > madv)) {
-               dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+               DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
                        msm_obj->madv, madv);
                mutex_unlock(&msm_obj->lock);
                return ERR_PTR(-EBUSY);
@@ -739,16 +789,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
                break;
        }
 
-       seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+       seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
                        obj->name, kref_read(&obj->refcount),
                        off, msm_obj->vaddr);
 
-       /* FIXME: we need to print the address space here too */
-       list_for_each_entry(vma, &msm_obj->vmas, list)
-               seq_printf(m, " %08llx", vma->iova);
+       seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+       if (!list_empty(&msm_obj->vmas)) {
+
+               seq_puts(m, "      vmas:");
 
-       seq_printf(m, " %zu%s\n", obj->size, madv);
+               list_for_each_entry(vma, &msm_obj->vmas, list)
+                       seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+                               vma->iova, vma->mapped ? "mapped" : "unmapped",
+                               vma->inuse);
+
+               seq_puts(m, "\n");
+       }
 
        rcu_read_lock();
        fobj = rcu_dereference(robj->fence);
@@ -775,9 +833,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
        int count = 0;
        size_t size = 0;
 
+       seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
        list_for_each_entry(msm_obj, list, mm_list) {
                struct drm_gem_object *obj = &msm_obj->base;
-               seq_printf(m, "   ");
+               seq_puts(m, "   ");
                msm_gem_describe(obj, m);
                count++;
                size += obj->size;
@@ -831,7 +890,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
 /* convenience method to construct a GEM buffer object, and userspace handle */
 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
-               uint32_t size, uint32_t flags, uint32_t *handle)
+               uint32_t size, uint32_t flags, uint32_t *handle,
+               char *name)
 {
        struct drm_gem_object *obj;
        int ret;
@@ -841,6 +901,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
+       if (name)
+               msm_gem_object_set_name(obj, "%s", name);
+
        ret = drm_gem_handle_create(file, obj, handle);
 
        /* drop reference from allocate - handle holds it now */
@@ -864,7 +927,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
        case MSM_BO_WC:
                break;
        default:
-               dev_err(dev->dev, "invalid cache flag: %x\n",
+               DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
                                (flags & MSM_BO_CACHE_MASK));
                return -EINVAL;
        }
@@ -912,9 +975,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
        size = PAGE_ALIGN(size);
 
-       if (!iommu_present(&platform_bus_type))
+       if (!msm_use_mmu(dev))
                use_vram = true;
-       else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+       else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
                use_vram = true;
 
        if (WARN_ON(use_vram && !priv->vram.size))
@@ -989,8 +1052,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        int ret, npages;
 
        /* if we don't have IOMMU, don't bother pretending we can import: */
-       if (!iommu_present(&platform_bus_type)) {
-               dev_err(dev->dev, "cannot import without IOMMU\n");
+       if (!msm_use_mmu(dev)) {
+               DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
                return ERR_PTR(-EINVAL);
        }
 
@@ -1040,24 +1103,30 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
                return ERR_CAST(obj);
 
        if (iova) {
-               ret = msm_gem_get_iova(obj, aspace, iova);
-               if (ret) {
-                       drm_gem_object_put(obj);
-                       return ERR_PTR(ret);
-               }
+               ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+               if (ret)
+                       goto err;
        }
 
        vaddr = msm_gem_get_vaddr(obj);
        if (IS_ERR(vaddr)) {
-               msm_gem_put_iova(obj, aspace);
-               drm_gem_object_put(obj);
-               return ERR_CAST(vaddr);
+               msm_gem_unpin_iova(obj, aspace);
+               ret = PTR_ERR(vaddr);
+               goto err;
        }
 
        if (bo)
                *bo = obj;
 
        return vaddr;
+err:
+       if (locked)
+               drm_gem_object_put(obj);
+       else
+               drm_gem_object_put_unlocked(obj);
+
+       return ERR_PTR(ret);
+
 }
 
 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1142,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
 {
        return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
 }
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+               struct msm_gem_address_space *aspace, bool locked)
+{
+       if (IS_ERR_OR_NULL(bo))
+               return;
+
+       msm_gem_put_vaddr(bo);
+       msm_gem_unpin_iova(bo, aspace);
+
+       if (locked)
+               drm_gem_object_put(bo);
+       else
+               drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(bo);
+       va_list ap;
+
+       if (!fmt)
+               return;
+
+       va_start(ap, fmt);
+       vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+       va_end(ap);
+}
index c5d9bd3e47a8d20100f4c29da961aba368fb8752..2064fac871b8c8040a6ff129996952471f7ec0d4 100644 (file)
@@ -41,6 +41,8 @@ struct msm_gem_vma {
        uint64_t iova;
        struct msm_gem_address_space *aspace;
        struct list_head list;    /* node in msm_gem_object::vmas */
+       bool mapped;
+       int inuse;
 };
 
 struct msm_gem_object {
@@ -91,6 +93,8 @@ struct msm_gem_object {
         */
        struct drm_mm_node *vram_node;
        struct mutex lock; /* Protects resources associated with bo */
+
+       char name[32]; /* Identifier to print for the debugfs files */
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
@@ -150,6 +154,7 @@ struct msm_gem_submit {
        struct msm_ringbuffer *ring;
        unsigned int nr_cmds;
        unsigned int nr_bos;
+       u32 ident;         /* A "identifier" for the submit for logging */
        struct {
                uint32_t type;
                uint32_t size;  /* in dwords */
index a90aedd6883a8692bc68d5353b17049e4f1a3faf..3cbed4acb0f4edb8825db3410d3be58bb1c592a9 100644 (file)
@@ -20,6 +20,7 @@
 #include "msm_drv.h"
 #include "msm_gpu.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 
 /*
  * Cmdstream submission:
@@ -48,7 +49,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
        submit->dev = dev;
        submit->gpu = gpu;
        submit->fence = NULL;
-       submit->pid = get_pid(task_pid(current));
        submit->cmd = (void *)&submit->bos[nr_bos];
        submit->queue = queue;
        submit->ring = gpu->rb[queue->prio];
@@ -114,8 +114,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                        pagefault_disable();
                }
 
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
                if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
-                       !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+                       !(submit_bo.flags & MANDATORY_FLAGS)) {
                        DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -167,7 +170,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
        struct msm_gem_object *msm_obj = submit->bos[i].obj;
 
        if (submit->bos[i].flags & BO_PINNED)
-               msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+               msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
 
        if (submit->bos[i].flags & BO_LOCKED)
                ww_mutex_unlock(&msm_obj->resv->lock);
@@ -270,7 +273,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
                uint64_t iova;
 
                /* if locking succeeded, pin bo: */
-               ret = msm_gem_get_iova(&msm_obj->base,
+               ret = msm_gem_get_and_pin_iova(&msm_obj->base,
                                submit->gpu->aspace, &iova);
 
                if (ret)
@@ -406,6 +409,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file)
 {
+       static atomic_t ident = ATOMIC_INIT(0);
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_msm_gem_submit *args = data;
        struct msm_file_private *ctx = file->driver_priv;
@@ -416,9 +420,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
        int out_fence_fd = -1;
+       struct pid *pid = get_pid(task_pid(current));
        unsigned i;
-       int ret;
-
+       int ret, submitid;
        if (!gpu)
                return -ENXIO;
 
@@ -441,7 +445,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (!queue)
                return -ENOENT;
 
+       /* Get a unique identifier for the submission for logging purposes */
+       submitid = atomic_inc_return(&ident) - 1;
+
        ring = gpu->rb[queue->prio];
+       trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+               args->nr_bos, args->nr_cmds);
 
        if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
                in_fence = sync_file_get_fence(args->fence_fd);
@@ -478,6 +487,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
+       submit->pid = pid;
+       submit->ident = submitid;
+
        if (args->flags & MSM_SUBMIT_SUDO)
                submit->in_rb = true;
 
index ffbec224551b52f927eebb92b4319134b3399e2c..557360788084eb3db21e0c964722a0ce1923f92b 100644 (file)
@@ -38,20 +38,72 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
                kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-               struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma)
 {
-       if (!aspace || !vma->iova)
+       unsigned size = vma->node.size << PAGE_SHIFT;
+
+       /* Print a message if we try to purge a vma in use */
+       if (WARN_ON(vma->inuse > 0))
+               return;
+
+       /* Don't do anything if the memory isn't mapped */
+       if (!vma->mapped)
                return;
 
-       if (aspace->mmu) {
-               unsigned size = vma->node.size << PAGE_SHIFT;
-               aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
-       }
+       if (aspace->mmu)
+               aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+       vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma)
+{
+       if (!WARN_ON(!vma->iova))
+               vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+       unsigned size = npages << PAGE_SHIFT;
+       int ret = 0;
+
+       if (WARN_ON(!vma->iova))
+               return -EINVAL;
+
+       /* Increase the usage counter */
+       vma->inuse++;
+
+       if (vma->mapped)
+               return 0;
+
+       vma->mapped = true;
+
+       if (aspace->mmu)
+               ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+                               size, IOMMU_READ | IOMMU_WRITE);
+
+       if (ret)
+               vma->mapped = false;
+
+       return ret;
+}
+
+/* Close an iova.  Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma)
+{
+       if (WARN_ON(vma->inuse > 0 || vma->mapped))
+               return;
 
        spin_lock(&aspace->lock);
-       drm_mm_remove_node(&vma->node);
+       if (vma->iova)
+               drm_mm_remove_node(&vma->node);
        spin_unlock(&aspace->lock);
 
        vma->iova = 0;
@@ -59,18 +111,16 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
        msm_gem_address_space_put(aspace);
 }
 
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
-               struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+               struct msm_gem_vma *vma, int npages)
 {
        int ret;
 
-       spin_lock(&aspace->lock);
-       if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
-               spin_unlock(&aspace->lock);
-               return 0;
-       }
+       if (WARN_ON(vma->iova))
+               return -EBUSY;
 
+       spin_lock(&aspace->lock);
        ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
        spin_unlock(&aspace->lock);
 
@@ -78,19 +128,14 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
                return ret;
 
        vma->iova = vma->node.start << PAGE_SHIFT;
+       vma->mapped = false;
 
-       if (aspace->mmu) {
-               unsigned size = npages << PAGE_SHIFT;
-               ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
-                               size, IOMMU_READ | IOMMU_WRITE);
-       }
-
-       /* Get a reference to the aspace to keep it around */
        kref_get(&aspace->kref);
 
-       return ret;
+       return 0;
 }
 
+
 struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
                const char *name)
@@ -114,3 +159,26 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 
        return aspace;
 }
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+               const char *name, uint64_t va_start, uint64_t va_end)
+{
+       struct msm_gem_address_space *aspace;
+       u64 size = va_end - va_start;
+
+       aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+       if (!aspace)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&aspace->lock);
+       aspace->name = name;
+       aspace->mmu = msm_gpummu_new(dev, gpu);
+
+       drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+               size >> PAGE_SHIFT);
+
+       kref_init(&aspace->kref);
+
+       return aspace;
+}
index 11aac83370664f45ce5c8a39e6bb6b284581ae40..6e079a83bd369899f0daba5817a6dcf4d8c27630 100644 (file)
@@ -19,6 +19,8 @@
 #include "msm_gem.h"
 #include "msm_mmu.h"
 #include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
 
 #include <generated/utsrelease.h>
 #include <linux/string_helpers.h>
@@ -107,7 +109,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
                        &msm_devfreq_profile, "simple_ondemand", NULL);
 
        if (IS_ERR(gpu->devfreq.devfreq)) {
-               dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+               DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
                gpu->devfreq.devfreq = NULL;
        }
 
@@ -122,7 +124,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
        if (gpu->gpu_reg) {
                ret = regulator_enable(gpu->gpu_reg);
                if (ret) {
-                       dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
                        return ret;
                }
        }
@@ -130,7 +132,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
        if (gpu->gpu_cx) {
                ret = regulator_enable(gpu->gpu_cx);
                if (ret) {
-                       dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+                       DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
                        return ret;
                }
        }
@@ -315,28 +317,28 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
        struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
 
        /* Don't record write only objects */
-
        state_bo->size = obj->base.size;
        state_bo->iova = iova;
 
-       /* Only store the data for buffer objects marked for read */
-       if ((flags & MSM_SUBMIT_BO_READ)) {
+       /* Only store data for non imported buffer objects marked for read */
+       if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
                void *ptr;
 
                state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
                if (!state_bo->data)
-                       return;
+                       goto out;
 
                ptr = msm_gem_get_vaddr_active(&obj->base);
                if (IS_ERR(ptr)) {
                        kvfree(state_bo->data);
-                       return;
+                       state_bo->data = NULL;
+                       goto out;
                }
 
                memcpy(state_bo->data, ptr, obj->base.size);
                msm_gem_put_vaddr(&obj->base);
        }
-
+out:
        state->nr_bos++;
 }
 
@@ -360,12 +362,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
        if (submit) {
                int i;
 
-               state->bos = kcalloc(submit->nr_bos,
+               state->bos = kcalloc(submit->nr_cmds,
                        sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
 
-               for (i = 0; state->bos && i < submit->nr_bos; i++)
-                       msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
-                               submit->bos[i].iova, submit->bos[i].flags);
+               for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+                       int idx = submit->cmd[i].idx;
+
+                       msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+                               submit->bos[idx].iova, submit->bos[idx].flags);
+               }
        }
 
        /* Set the active crash state to be dumped on failure */
@@ -428,7 +433,7 @@ static void recover_worker(struct work_struct *work)
 
        mutex_lock(&dev->struct_mutex);
 
-       dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+       DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 
        submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
        if (submit) {
@@ -456,7 +461,7 @@ static void recover_worker(struct work_struct *work)
                rcu_read_unlock();
 
                if (comm && cmd) {
-                       dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+                       DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
                                gpu->name, comm, cmd);
 
                        msm_rd_dump_submit(priv->hangrd, submit,
@@ -539,11 +544,11 @@ static void hangcheck_handler(struct timer_list *t)
        } else if (fence < ring->seqno) {
                /* no progress and not done.. hung! */
                ring->hangcheck_fence = fence;
-               dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+               DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
                                gpu->name, ring->id);
-               dev_err(dev->dev, "%s:     completed fence: %u\n",
+               DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
                                gpu->name, fence);
-               dev_err(dev->dev, "%s:     submitted fence: %u\n",
+               DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
                                gpu->name, ring->seqno);
 
                queue_work(priv->wq, &gpu->recover_work);
@@ -659,15 +664,33 @@ out:
  * Cmdstream submission/retirement:
  */
 
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+               struct msm_gem_submit *submit)
 {
+       int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+       volatile struct msm_gpu_submit_stats *stats;
+       u64 elapsed, clock = 0;
        int i;
 
+       stats = &ring->memptrs->stats[index];
+       /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+       elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+       do_div(elapsed, 192);
+
+       /* Calculate the clock frequency from the number of CP cycles */
+       if (elapsed) {
+               clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+               do_div(clock, elapsed);
+       }
+
+       trace_msm_gpu_submit_retired(submit, elapsed, clock,
+               stats->alwayson_start, stats->alwayson_end);
+
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
                /* move to inactive: */
                msm_gem_move_to_inactive(&msm_obj->base);
-               msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+               msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
                drm_gem_object_put(&msm_obj->base);
        }
 
@@ -690,7 +713,7 @@ static void retire_submits(struct msm_gpu *gpu)
 
                list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
                        if (dma_fence_is_signaled(submit->fence))
-                               retire_submit(gpu, submit);
+                               retire_submit(gpu, ring, submit);
                }
        }
 }
@@ -751,7 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
                /* submit takes a reference to the bo and iova until retired: */
                drm_gem_object_get(&msm_obj->base);
-               msm_gem_get_iova(&msm_obj->base,
+               msm_gem_get_and_pin_iova(&msm_obj->base,
                                submit->gpu->aspace, &iova);
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -800,7 +823,6 @@ static struct msm_gem_address_space *
 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
                uint64_t va_start, uint64_t va_end)
 {
-       struct iommu_domain *iommu;
        struct msm_gem_address_space *aspace;
        int ret;
 
@@ -809,20 +831,27 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
         * and have separate page tables per context.  For now, to keep things
         * simple and to get something working, just use a single address space:
         */
-       iommu = iommu_domain_alloc(&platform_bus_type);
-       if (!iommu)
-               return NULL;
-
-       iommu->geometry.aperture_start = va_start;
-       iommu->geometry.aperture_end = va_end;
-
-       dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+       if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+               struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+               if (!iommu)
+                       return NULL;
+
+               iommu->geometry.aperture_start = va_start;
+               iommu->geometry.aperture_end = va_end;
+
+               DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+               aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+               if (IS_ERR(aspace))
+                       iommu_domain_free(iommu);
+       } else {
+               aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+                       va_start, va_end);
+       }
 
-       aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
        if (IS_ERR(aspace)) {
-               dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+               DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
                        PTR_ERR(aspace));
-               iommu_domain_free(iommu);
                return ERR_CAST(aspace);
        }
 
@@ -871,14 +900,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        gpu->irq = platform_get_irq_byname(pdev, config->irqname);
        if (gpu->irq < 0) {
                ret = gpu->irq;
-               dev_err(drm->dev, "failed to get irq: %d\n", ret);
+               DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
                goto fail;
        }
 
        ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
                        IRQF_TRIGGER_HIGH, gpu->name, gpu);
        if (ret) {
-               dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+               DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
                goto fail;
        }
 
@@ -911,22 +940,25 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                config->va_start, config->va_end);
 
        if (gpu->aspace == NULL)
-               dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+               DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
        else if (IS_ERR(gpu->aspace)) {
                ret = PTR_ERR(gpu->aspace);
                goto fail;
        }
 
-       memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+       memptrs = msm_gem_kernel_new(drm,
+               sizeof(struct msm_rbmemptrs) * nr_rings,
                MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
                &memptrs_iova);
 
        if (IS_ERR(memptrs)) {
                ret = PTR_ERR(memptrs);
-               dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+               DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
                goto fail;
        }
 
+       msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
        if (nr_rings > ARRAY_SIZE(gpu->rb)) {
                DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
                        ARRAY_SIZE(gpu->rb));
@@ -939,7 +971,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 
                if (IS_ERR(gpu->rb[i])) {
                        ret = PTR_ERR(gpu->rb[i]);
-                       dev_err(drm->dev,
+                       DRM_DEV_ERROR(drm->dev,
                                "could not create ringbuffer %d: %d\n", i, ret);
                        goto fail;
                }
@@ -958,11 +990,7 @@ fail:
                gpu->rb[i] = NULL;
        }
 
-       if (gpu->memptrs_bo) {
-               msm_gem_put_vaddr(gpu->memptrs_bo);
-               msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
-               drm_gem_object_put_unlocked(gpu->memptrs_bo);
-       }
+       msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 
        platform_set_drvdata(pdev, NULL);
        return ret;
@@ -981,11 +1009,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                gpu->rb[i] = NULL;
        }
 
-       if (gpu->memptrs_bo) {
-               msm_gem_put_vaddr(gpu->memptrs_bo);
-               msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
-               drm_gem_object_put_unlocked(gpu->memptrs_bo);
-       }
+       msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
 
        if (!IS_ERR_OR_NULL(gpu->aspace)) {
                gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
index f82bac0866664bc3e00e1903abb6d428ab5eee9b..efb49bb64191732a0a8ee683e1f9790389295fa9 100644 (file)
@@ -187,6 +187,7 @@ struct msm_gpu_state_bo {
        u64 iova;
        size_t size;
        void *data;
+       bool encoded;
 };
 
 struct msm_gpu_state {
@@ -201,6 +202,7 @@ struct msm_gpu_state {
                u32 wptr;
                void *data;
                int data_size;
+               bool encoded;
        } ring[MSM_GPU_MAX_RINGS];
 
        int nr_registers;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644 (file)
index 0000000..1155118
--- /dev/null
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+           TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+           TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(u32, id)
+                   __field(u32, ringid)
+                   __field(u32, nr_cmds)
+                   __field(u32, nr_bos)
+                   ),
+           TP_fast_assign(
+                   __entry->pid = pid;
+                   __entry->id = id;
+                   __entry->ringid = ringid;
+                   __entry->nr_bos = nr_bos;
+                   __entry->nr_cmds = nr_cmds
+                   ),
+           TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+                   __entry->id, __entry->pid, __entry->ringid,
+                   __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+           TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+           TP_ARGS(submit, ticks),
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(u32, id)
+                   __field(u32, ringid)
+                   __field(u32, seqno)
+                   __field(u64, ticks)
+                   ),
+           TP_fast_assign(
+                   __entry->pid = pid_nr(submit->pid);
+                   __entry->id = submit->ident;
+                   __entry->ringid = submit->ring->id;
+                   __entry->seqno = submit->seqno;
+                   __entry->ticks = ticks;
+                   ),
+           TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+                   __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+                   __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+           TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+                   u64 start, u64 end),
+           TP_ARGS(submit, elapsed, clock, start, end),
+           TP_STRUCT__entry(
+                   __field(pid_t, pid)
+                   __field(u32, id)
+                   __field(u32, ringid)
+                   __field(u32, seqno)
+                   __field(u64, elapsed)
+                   __field(u64, clock)
+                   __field(u64, start_ticks)
+                   __field(u64, end_ticks)
+                   ),
+           TP_fast_assign(
+                   __entry->pid = pid_nr(submit->pid);
+                   __entry->id = submit->ident;
+                   __entry->ringid = submit->ring->id;
+                   __entry->seqno = submit->seqno;
+                   __entry->elapsed = elapsed;
+                   __entry->clock = clock;
+                   __entry->start_ticks = start;
+                   __entry->end_ticks = end;
+                   ),
+           TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+                   __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+                   __entry->elapsed, __entry->clock,
+                   __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644 (file)
index 0000000..72c074f
--- /dev/null
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644 (file)
index 0000000..27312b5
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+       struct msm_mmu base;
+       struct msm_gpu *gpu;
+       dma_addr_t pt_base;
+       uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+               int cnt)
+{
+       return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+               int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+               struct sg_table *sgt, unsigned len, int prot)
+{
+       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+       struct scatterlist *sg;
+       unsigned prot_bits = 0;
+       unsigned i, j;
+
+       if (prot & IOMMU_WRITE)
+               prot_bits |= 1;
+       if (prot & IOMMU_READ)
+               prot_bits |= 2;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               dma_addr_t addr = sg->dma_address;
+               for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+                       gpummu->table[idx] = addr | prot_bits;
+                       addr += GPUMMU_PAGE_SIZE;
+               }
+       }
+
+       /* we can improve by deferring flush for multiple map() */
+       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+       return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+       unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+       unsigned i;
+
+       for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+                gpummu->table[idx] = 0;
+
+       gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+               A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+       return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+       struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+       dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+               DMA_ATTR_FORCE_CONTIGUOUS);
+
+       kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+               .attach = msm_gpummu_attach,
+               .detach = msm_gpummu_detach,
+               .map = msm_gpummu_map,
+               .unmap = msm_gpummu_unmap,
+               .destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+       struct msm_gpummu *gpummu;
+
+       gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+       if (!gpummu)
+               return ERR_PTR(-ENOMEM);
+
+       gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+               GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+       if (!gpummu->table) {
+               kfree(gpummu);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gpummu->gpu = gpu;
+       msm_mmu_init(&gpummu->base, dev, &funcs);
+
+       return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+               dma_addr_t *tran_error)
+{
+       dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+       *pt_base = base;
+       *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
index b23d33622f374b0ce88791914b53cb126899676b..9c313cb129ee75cf2f63515d7160b25a22e9772f 100644 (file)
@@ -71,8 +71,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
        return (ret == len) ? 0 : -EINVAL;
 }
 
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
-               struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
 
index fd88cebb6adb339f489137178bd49e8ec0b0eafc..2b81b43a4bab2ddf9ceee9dfcfe96e446b8530e5 100644 (file)
@@ -67,9 +67,6 @@ struct msm_kms_funcs {
        void (*set_encoder_mode)(struct msm_kms *kms,
                                 struct drm_encoder *encoder,
                                 bool cmd_mode);
-       /* pm suspend/resume hooks */
-       int (*pm_suspend)(struct device *dev);
-       int (*pm_resume)(struct device *dev);
        /* cleanup: */
        void (*destroy)(struct msm_kms *kms);
 #ifdef CONFIG_DEBUG_FS
index aa2c5d4580c820b2e54c16317c121994bd7f37a9..d21b26604d0b8243f711618d96a5dc8de1ad676e 100644 (file)
@@ -25,8 +25,7 @@ struct msm_mmu_funcs {
        void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
        int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
                        unsigned len, int prot);
-       int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
-                       unsigned len);
+       int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
        void (*destroy)(struct msm_mmu *mmu);
 };
 
@@ -54,4 +53,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
        mmu->handler = handler;
 }
 
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+               dma_addr_t *tran_error);
+
 #endif /* __MSM_MMU_H__ */
index cca9334584391d97a4026f6ae48bfdb8d7f12ae9..b5672061ae08544280fb8f10a453564da7d68efd 100644 (file)
@@ -345,6 +345,12 @@ static void snapshot_buf(struct msm_rd_state *rd,
        msm_gem_put_vaddr(&obj->base);
 }
 
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+       return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
                const char *fmt, ...)
@@ -386,15 +392,16 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
 
        rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
 
-       for (i = 0; rd_full && i < submit->nr_bos; i++)
-               snapshot_buf(rd, submit, i, 0, 0);
+       for (i = 0; i < submit->nr_bos; i++)
+               if (should_dump(submit, i))
+                       snapshot_buf(rd, submit, i, 0, 0);
 
        for (i = 0; i < submit->nr_cmds; i++) {
                uint64_t iova = submit->cmd[i].iova;
                uint32_t szd  = submit->cmd[i].size; /* in dwords */
 
                /* snapshot cmdstream bo's (if we haven't already): */
-               if (!rd_full) {
+               if (!should_dump(submit, i)) {
                        snapshot_buf(rd, submit, submit->cmd[i].idx,
                                        submit->cmd[i].iova, szd * 4);
                }
index 6f5295b3f2f69b0afc47bdae9552cd3356246be5..20a96fe69dcd89b4ed4e5cf3332df81deb709d33 100644 (file)
@@ -36,15 +36,18 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
 
        ring->gpu = gpu;
        ring->id = id;
-       /* Pass NULL for the iova pointer - we will map it later */
+
        ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
-               MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+               MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
 
        if (IS_ERR(ring->start)) {
                ret = PTR_ERR(ring->start);
                ring->start = 0;
                goto fail;
        }
+
+       msm_gem_object_set_name(ring->bo, "ring%d", id);
+
        ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
        ring->next  = ring->start;
        ring->cur   = ring->start;
@@ -73,10 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 
        msm_fence_context_free(ring->fctx);
 
-       if (ring->bo) {
-               msm_gem_put_iova(ring->bo, ring->gpu->aspace);
-               msm_gem_put_vaddr(ring->bo);
-               drm_gem_object_put_unlocked(ring->bo);
-       }
+       msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
        kfree(ring);
 }
index cffce094aecb47d0788740220778022d64e5bb27..6434ebb1313657aa25678cd8861b1329d60be4db 100644 (file)
 #define rbmemptr(ring, member)  \
        ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
 
+#define rbmemptr_stats(ring, index, member) \
+       (rbmemptr((ring), stats) + \
+        ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+        offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+       u64 cpcycles_start;
+       u64 cpcycles_end;
+       u64 alwayson_start;
+       u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
 struct msm_rbmemptrs {
        volatile uint32_t rptr;
        volatile uint32_t fence;
+
+       volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
 };
 
 struct msm_ringbuffer {
index 70dce544984e848b54409a390a41c2a3f9c24d4f..1727d399833cc2fd17b0fcb477b70a573aac30a9 100644 (file)
@@ -67,7 +67,7 @@ nv04_display_create(struct drm_device *dev)
        for (i = 0; i < dcb->entries; i++) {
                struct dcb_output *dcbent = &dcb->entry[i];
 
-               connector = nouveau_connector_create(dev, dcbent->connector);
+               connector = nouveau_connector_create(dev, dcbent);
                if (IS_ERR(connector))
                        continue;
 
index 849b0f45afb866dbb2004dea301e0090aaafd8db..3d074aa311732e777ba183d2afa57bbe3c54d7be 100644 (file)
@@ -7,6 +7,7 @@ nouveau-y += dispnv50/core827d.o
 nouveau-y += dispnv50/core907d.o
 nouveau-y += dispnv50/core917d.o
 nouveau-y += dispnv50/corec37d.o
+nouveau-y += dispnv50/corec57d.o
 
 nouveau-y += dispnv50/dac507d.o
 nouveau-y += dispnv50/dac907d.o
@@ -23,12 +24,14 @@ nouveau-y += dispnv50/head827d.o
 nouveau-y += dispnv50/head907d.o
 nouveau-y += dispnv50/head917d.o
 nouveau-y += dispnv50/headc37d.o
+nouveau-y += dispnv50/headc57d.o
 
 nouveau-y += dispnv50/wimm.o
 nouveau-y += dispnv50/wimmc37b.o
 
 nouveau-y += dispnv50/wndw.o
 nouveau-y += dispnv50/wndwc37e.o
+nouveau-y += dispnv50/wndwc57e.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
index 908feb1fc60f7cadb855e7fa71dc61bd42b63567..a194990d2b0da2c5e576e9672de55b6ff91c7a60 100644 (file)
@@ -54,9 +54,10 @@ struct nv50_head_atom {
                u64 offset:40;
                u8 buffer:1;
                u8 mode:4;
-               u8 size:2;
+               u16 size:11;
                u8 range:2;
                u8 output_mode:2;
+               void (*load)(struct drm_color_lut *, int size, void __iomem *);
        } olut;
 
        struct {
@@ -169,9 +170,11 @@ struct nv50_wndw_atom {
                        u8  buffer:1;
                        u8  enable:2;
                        u8  mode:4;
-                       u8  size:2;
+                       u16 size:11;
                        u8  range:2;
                        u8  output_mode:2;
+                       void (*load)(struct drm_color_lut *, int size,
+                                    void __iomem *);
                } i;
        } xlut;
 
index a562fc94ce5990700b1664af2c295586b7dca1b8..049ce6da321c3fa3e102b1b1773122ea02b9566d 100644 (file)
@@ -80,6 +80,7 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        asyw->xlut.i.mode = 7;
        asyw->xlut.i.enable = 2;
+       asyw->xlut.i.load = head907d_olut_load;
 }
 
 const struct nv50_wndw_func
index f3c49adb1bdb9ab656303739d108f7b13e90ec26..c25e0ebe3c92e92a724068fec1a818b171322abf 100644 (file)
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
                int version;
                int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
        } cores[] = {
+               { TU104_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
                { GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
                { GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
index 8470df9dd13de463bebe69cc762173b0f8586dd1..df8336b593f7c4adc02878a171f8abf8d927e721 100644 (file)
@@ -46,5 +46,9 @@ extern const struct nv50_outp_func sor907d;
 int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
 
 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+void corec37d_update(struct nv50_core *, u32 *, bool);
 extern const struct nv50_outp_func sorc37d;
+
+int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
 #endif
index b5c17c94891874d9b9841aab7bc584c826bcddf1..7860774b65bc0322aab7e0d69450738ff7c720c4 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <nouveau_bo.h>
 
-static void
+void
 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
 {
        u32 *push;
@@ -71,7 +71,7 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
        nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
 }
 
-void
+static void
 corec37d_init(struct nv50_core *core)
 {
        const u32 windows = 8; /*XXX*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
new file mode 100644 (file)
index 0000000..b606d68
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static void
+corec57d_init(struct nv50_core *core)
+{
+       const u32 windows = 8; /*XXX*/
+       u32 *push, i;
+       if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+               evo_mthd(push, 0x0208, 1);
+               evo_data(push, core->chan.sync.handle);
+               for (i = 0; i < windows; i++) {
+                       evo_mthd(push, 0x1000 + (i * 0x080), 3);
+                       evo_data(push, i >> 1);
+                       evo_data(push, 0x0000000f);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x1010 + (i * 0x080), 1);
+                       evo_data(push, 0x00117fff);
+               }
+               evo_mthd(push, 0x0200, 1);
+               evo_data(push, 0x00000001);
+               evo_kick(push, &core->chan);
+       }
+}
+
+static const struct nv50_core_func
+corec57d = {
+       .init = corec57d_init,
+       .ntfy_init = corec37d_ntfy_init,
+       .ntfy_wait_done = corec37d_ntfy_wait_done,
+       .update = corec37d_update,
+       .head = &headc57d,
+       .sor = &sorc37d,
+};
+
+int
+corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+       return core507d_new_(&corec57d, drm, oclass, pcore);
+}
index f592087338c465267548dee6f9309d33bd052ec4..cb6e4d2b1b4508fcf9e74048285a2149fe700f63 100644 (file)
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
                int version;
                int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
        } curses[] = {
+               { TU104_DISP_CURSOR, 0, cursc37a_new },
                { GV100_DISP_CURSOR, 0, cursc37a_new },
                { GK104_DISP_CURSOR, 0, curs907a_new },
                { GF110_DISP_CURSOR, 0, curs907a_new },
index 6cbbae3f438bd0e44cbc01406687ed82170b7372..00add3ba051fb5644b4cd0640a8e6ce5e4ddda8b 100644 (file)
@@ -1255,8 +1255,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
 static void
 nv50_mstm_init(struct nv50_mstm *mstm)
 {
-       if (mstm && mstm->mgr.mst_state)
-               drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+       int ret;
+
+       if (!mstm || !mstm->mgr.mst_state)
+               return;
+
+       ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+       if (ret == -1) {
+               drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+               drm_kms_helper_hotplug_event(mstm->mgr.dev);
+       }
 }
 
 static void
@@ -2293,7 +2301,7 @@ nv50_display_create(struct drm_device *dev)
 
        /* create encoder/connector objects based on VBIOS DCB table */
        for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
-               connector = nouveau_connector_create(dev, dcbe->connector);
+               connector = nouveau_connector_create(dev, dcbe);
                if (IS_ERR(connector))
                        continue;
 
index e48c5eb35b49883d015d3688a58888d9fa0b80c5..2216c58620c2de8253b57c17446a24909eca9d3e 100644 (file)
@@ -45,6 +45,8 @@ struct nv50_disp_interlock {
 
 void corec37d_ntfy_init(struct nouveau_bo *, u32);
 
+void head907d_olut_load(struct drm_color_lut *, int size, void __iomem *);
+
 struct nv50_chan {
        struct nvif_object user;
        struct nvif_device *device;
index 4f57e53797968e845175bb6b0689aa73e9d2bc29..ac97ebce5b35139d15cfddf08583f3b0719800ef 100644 (file)
@@ -50,9 +50,9 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        if (asyh->set.core   ) head->func->core_set(head, asyh);
        if (asyh->set.olut   ) {
                asyh->olut.offset = nv50_lut_load(&head->olut,
-                                                 asyh->olut.mode <= 1,
                                                  asyh->olut.buffer,
-                                                 asyh->state.gamma_lut);
+                                                 asyh->state.gamma_lut,
+                                                 asyh->olut.load);
                head->func->olut_set(head, asyh);
        }
        if (asyh->set.curs   ) head->func->curs_set(head, asyh);
@@ -210,7 +210,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
                }
        }
 
-       if (!olut) {
+       if (!olut && !head->func->olut_identity) {
                asyh->olut.handle = 0;
                return 0;
        }
index 37b3248c6dae4d5a9b736553518eef30534865cf..d1c002f534d4edaffb48bff227a0b1b81d90d986 100644 (file)
@@ -21,6 +21,7 @@ struct nv50_head_func {
        void (*view)(struct nv50_head *, struct nv50_head_atom *);
        void (*mode)(struct nv50_head *, struct nv50_head_atom *);
        void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+       bool olut_identity;
        void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
        void (*olut_clr)(struct nv50_head *);
        void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -75,4 +76,14 @@ int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
                         struct nv50_head_atom *);
 
 extern const struct nv50_head_func headc37d;
+void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_clr(struct nv50_head *);
+int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+                        struct nv50_head_atom *);
+void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_curs_clr(struct nv50_head *);
+void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc57d;
 #endif
index 51bc5996fd37a1a85e4e2189995f8d8bdb10f33e..7561be5ca707c5d2447ecb027349e60a004fc6fd 100644 (file)
@@ -254,6 +254,23 @@ head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
+static void
+head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+       for (; size--; in++, mem += 8) {
+               writew(drm_color_lut_extract(in->  red, 11) << 3, mem + 0);
+               writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
+               writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
+       }
+
+       /* INTERPOLATE modes require a "next" entry to interpolate with,
+        * so we replicate the last entry to deal with this for now.
+        */
+       writew(readw(mem - 8), mem + 0);
+       writew(readw(mem - 6), mem + 2);
+       writew(readw(mem - 4), mem + 4);
+}
+
 void
 head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
@@ -261,6 +278,8 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
                asyh->olut.mode = 0;
        else
                asyh->olut.mode = 1;
+
+       asyh->olut.load = head507d_olut_load;
 }
 
 void
index 633907163eb1f1289b08da747c144649eb17b6f8..c2d09dd97b1ff1a01c91a6593f7b8f4d18d12ea8 100644 (file)
@@ -213,10 +213,28 @@ head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
+void
+head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+       for (; size--; in++, mem += 8) {
+               writew(drm_color_lut_extract(in->  red, 14) + 0x6000, mem + 0);
+               writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
+               writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
+       }
+
+       /* INTERPOLATE modes require a "next" entry to interpolate with,
+        * so we replicate the last entry to deal with this for now.
+        */
+       writew(readw(mem - 8), mem + 0);
+       writew(readw(mem - 6), mem + 2);
+       writew(readw(mem - 4), mem + 4);
+}
+
 void
 head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
        asyh->olut.mode = 7;
+       asyh->olut.load = head907d_olut_load;
 }
 
 void
index 989c14083066ddd4fbed6069f3e9673fab8de535..ef6a99d95a9cd009cfe9ad637dc0646e9ddbadad 100644 (file)
@@ -65,7 +65,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
-static void
+void
 headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -79,7 +79,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
-static void
+void
 headc37d_curs_clr(struct nv50_head *head)
 {
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -93,7 +93,7 @@ headc37d_curs_clr(struct nv50_head *head)
        }
 }
 
-static void
+void
 headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -112,7 +112,7 @@ headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
-static int
+int
 headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
                     struct nv50_head_atom *asyh)
 {
@@ -155,6 +155,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
        asyh->olut.size = 0;
        asyh->olut.range = 0;
        asyh->olut.output_mode = 1;
+       asyh->olut.load = head907d_olut_load;
 }
 
 static void
@@ -181,7 +182,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        }
 }
 
-static void
+void
 headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
 {
        struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
new file mode 100644 (file)
index 0000000..32a7f9e
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+static void
+headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       u32 *push;
+       if ((push = evo_wait(core, 2))) {
+               /*XXX: This is a dirty hack until OR depth handling is
+                *     improved later for deep colour etc.
+                */
+               switch (asyh->or.depth) {
+               case 6: asyh->or.depth = 5; break;
+               case 5: asyh->or.depth = 4; break;
+               case 2: asyh->or.depth = 1; break;
+               case 0: asyh->or.depth = 4; break;
+               default:
+                       WARN_ON(1);
+                       break;
+               }
+
+               evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
+               evo_data(push, 0xfc000001 |
+                              asyh->or.depth << 4 |
+                              asyh->or.nvsync << 3 |
+                              asyh->or.nhsync << 2);
+               evo_kick(push, core);
+       }
+}
+
+static void
+headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       u32 *push;
+       if ((push = evo_wait(core, 2))) {
+               evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
+#if 0
+               evo_data(push, 0x80000000 |
+                              asyh->procamp.sat.sin << 16 |
+                              asyh->procamp.sat.cos << 4);
+#else
+               evo_data(push, 0);
+#endif
+               evo_kick(push, core);
+       }
+}
+
+void
+headc57d_olut_clr(struct nv50_head *head)
+{
+       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       u32 *push;
+       if ((push = evo_wait(core, 2))) {
+               evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, core);
+       }
+}
+
+void
+headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       u32 *push;
+       if ((push = evo_wait(core, 4))) {
+               evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
+               evo_data(push, asyh->olut.size << 8 |
+                              asyh->olut.mode << 2 |
+                              asyh->olut.output_mode);
+               evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
+               evo_data(push, asyh->olut.handle);
+               evo_data(push, asyh->olut.offset >> 8);
+               evo_kick(push, core);
+       }
+}
+
+static void
+headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+       memset_io(mem, 0x00, 0x20); /* VSS header. */
+       mem += 0x20;
+
+       while (size--) {
+               u16 r = drm_color_lut_extract(in->  red + 0, 16);
+               u16 g = drm_color_lut_extract(in->green + 0, 16);
+               u16 b = drm_color_lut_extract(in-> blue + 0, 16);
+               u16 ri = 0, gi = 0, bi = 0, i;
+
+               if (in++, size) {
+                       ri = (drm_color_lut_extract(in->  red, 16) - r) / 4;
+                       gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
+                       bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
+               }
+
+               for (i = 0; i < 4; i++, mem += 8) {
+                       writew(r + ri * i, mem + 0);
+                       writew(g + gi * i, mem + 2);
+                       writew(b + bi * i, mem + 4);
+               }
+       }
+
+       /* INTERPOLATE modes require a "next" entry to interpolate with,
+        * so we replicate the last entry to deal with this for now.
+        */
+       writew(readw(mem - 8), mem + 0);
+       writew(readw(mem - 6), mem + 2);
+       writew(readw(mem - 4), mem + 4);
+}
+
+static void
+headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+       memset_io(mem, 0x00, 0x20); /* VSS header. */
+       mem += 0x20;
+
+       for (; size--; in++, mem += 0x08) {
+               writew(drm_color_lut_extract(in->  red, 16), mem + 0);
+               writew(drm_color_lut_extract(in->green, 16), mem + 2);
+               writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
+       }
+
+       /* INTERPOLATE modes require a "next" entry to interpolate with,
+        * so we replicate the last entry to deal with this for now.
+        */
+       writew(readw(mem - 8), mem + 0);
+       writew(readw(mem - 6), mem + 2);
+       writew(readw(mem - 4), mem + 4);
+}
+
+void
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       asyh->olut.mode = 2; /* DIRECT10 */
+       asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
+       asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+       if (asyh->state.gamma_lut &&
+           asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+               asyh->olut.load = headc57d_olut_load_8;
+       else
+               asyh->olut.load = headc57d_olut_load;
+}
+
+static void
+headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+       struct nv50_head_mode *m = &asyh->mode;
+       u32 *push;
+       if ((push = evo_wait(core, 12))) {
+               evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
+               evo_data(push, (m->v.active  << 16) | m->h.active );
+               evo_data(push, (m->v.synce   << 16) | m->h.synce  );
+               evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
+               evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
+               evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+               evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+               evo_data(push, m->clock * 1000);
+               evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
+               evo_data(push, m->clock * 1000);
+               /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+               evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
+               evo_data(push, 0x00001014);
+               evo_kick(push, core);
+       }
+}
+
+const struct nv50_head_func
+headc57d = {
+       .view = headc37d_view,
+       .mode = headc57d_mode,
+       .olut = headc57d_olut,
+       .olut_identity = true,
+       .olut_set = headc57d_olut_set,
+       .olut_clr = headc57d_olut_clr,
+       .curs_layout = head917d_curs_layout,
+       .curs_format = headc37d_curs_format,
+       .curs_set = headc37d_curs_set,
+       .curs_clr = headc37d_curs_clr,
+       .dither = headc37d_dither,
+       .procamp = headc57d_procamp,
+       .or = headc57d_or,
+};
index a6b96ae2a22f8f9bf01ee647738f44093671ca0b..994def4fd51a8da9deb2f99374ecfe3a88bc08fc 100644 (file)
 #include <nvif/class.h>
 
 u32
-nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
-             struct drm_property_blob *blob)
+nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
+             void (*load)(struct drm_color_lut *, int, void __iomem *))
 {
-       struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+       struct drm_color_lut *in = blob ? blob->data : NULL;
        void __iomem *mem = lut->mem[buffer].object.map.ptr;
-       const int size = blob->length / sizeof(*in);
-       int bits, shift, i;
-       u16 zero, r, g, b;
-       u32 addr = lut->mem[buffer].addr;
-
-       /* This can't happen.. But it shuts the compiler up. */
-       if (WARN_ON(size != 256))
-               return 0;
+       const u32 addr = lut->mem[buffer].addr;
+       int i;
 
-       if (legacy) {
-               bits = 11;
-               shift = 3;
-               zero = 0x0000;
+       if (!in) {
+               in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
+               if (!WARN_ON(!in)) {
+                       for (i = 0; i < 1024; i++) {
+                               in[i].red   =
+                               in[i].green =
+                               in[i].blue  = (i << 16) >> 10;
+                       }
+                       load(in, 1024, mem);
+                       kvfree(in);
+               }
        } else {
-               bits = 14;
-               shift = 0;
-               zero = 0x6000;
-       }
-
-       for (i = 0; i < size; i++) {
-               r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
-               g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
-               b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
-               writew(r, mem + (i * 0x08) + 0);
-               writew(g, mem + (i * 0x08) + 2);
-               writew(b, mem + (i * 0x08) + 4);
+               load(in, blob->length / sizeof(*in), mem);
        }
 
-       /* INTERPOLATE modes require a "next" entry to interpolate with,
-        * so we replicate the last entry to deal with this for now.
-        */
-       writew(r, mem + (i * 0x08) + 0);
-       writew(g, mem + (i * 0x08) + 2);
-       writew(b, mem + (i * 0x08) + 4);
        return addr;
 }
 
index 6d7b8352e4cb245b073860f798a3d93b02d0f72e..b3b9040cfe9a9c9f1c4701ebfab2a5ead95c7b6a 100644 (file)
@@ -2,6 +2,7 @@
 #define __NV50_KMS_LUT_H__
 #include <nvif/mem.h>
 struct drm_property_blob;
+struct drm_color_lut;
 struct nv50_disp;
 
 struct nv50_lut {
@@ -10,6 +11,6 @@ struct nv50_lut {
 
 int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
 void nv50_lut_fini(struct nv50_lut *);
-u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
-                 struct drm_property_blob *);
+u32 nv50_lut_load(struct nv50_lut *, int buffer, struct drm_property_blob *,
+                 void (*)(struct drm_color_lut *, int size, void __iomem *));
 #endif
index fc36e06964077c000bbcb0db683e70456c274665..bc9eeaf212ae0b9ca63c478ff3cc303a4ef7f616 100644 (file)
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
                int version;
                int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
        } wimms[] = {
+               { TU104_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                {}
        };
index 2187922e8dc28d4d11df28bc33aec3f91281791c..ba9eea2ff16bb2dbfac5606c3f1a566777d7abbc 100644 (file)
@@ -139,10 +139,8 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
        if (asyw->set.xlut ) {
                if (asyw->ilut) {
                        asyw->xlut.i.offset =
-                               nv50_lut_load(&wndw->ilut,
-                                             asyw->xlut.i.mode <= 1,
-                                             asyw->xlut.i.buffer,
-                                             asyw->ilut);
+                               nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
+                                             asyw->ilut, asyw->xlut.i.load);
                }
                wndw->func->xlut_set(wndw, asyw);
        }
@@ -322,6 +320,11 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
                asyh->wndw.olut &= ~BIT(wndw->id);
        }
 
+       if (!ilut && wndw->func->ilut_identity) {
+               static struct drm_property_blob dummy = {};
+               ilut = &dummy;
+       }
+
        /* Recalculate LUT state. */
        memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
        if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
@@ -623,6 +626,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
                int (*new)(struct nouveau_drm *, enum drm_plane_type,
                           int, s32, struct nv50_wndw **);
        } wndws[] = {
+               { TU104_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
                { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
                {}
        };
index b0b6428034b07f32659f254c32b410b192dc77de..03f3d8dc235a70cb5d15670c687b427c5c665319 100644 (file)
@@ -65,6 +65,7 @@ struct nv50_wndw_func {
        int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
                               struct nvif_device *);
        void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+       bool ilut_identity;
        bool olut_core;
        void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
        void (*xlut_clr)(struct nv50_wndw *);
@@ -90,6 +91,23 @@ extern const struct nv50_wimm_func curs507a;
 
 int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
                 struct nv50_wndw **);
+int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
+                 enum drm_plane_type type, int index, s32 oclass, u32 heads,
+                 struct nv50_wndw **);
+int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+                    struct nv50_head_atom *);
+void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+                     struct nv50_head_atom *);
+void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_sema_clr(struct nv50_wndw *);
+void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_ntfy_clr(struct nv50_wndw *);
+void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_update(struct nv50_wndw *, u32 *);
+
+int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+                struct nv50_wndw **);
 
 int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
                  struct nv50_wndw **);
index 44afb0f069a55e362b969604161da7508b55c07a..e52a85c83f7a81358981536b419db9e5a475d82e 100644 (file)
@@ -61,9 +61,10 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        asyw->xlut.i.size = 0;
        asyw->xlut.i.range = 0;
        asyw->xlut.i.output_mode = 1;
+       asyw->xlut.i.load = head907d_olut_load;
 }
 
-static void
+void
 wndwc37e_image_clr(struct nv50_wndw *wndw)
 {
        u32 *push;
@@ -76,7 +77,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
        }
 }
 
-static void
+void
 wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        u32 *push;
@@ -117,7 +118,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        evo_kick(push, &wndw->wndw);
 }
 
-static void
+void
 wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
 {
        u32 *push;
@@ -128,7 +129,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
        }
 }
 
-static void
+void
 wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        u32 *push;
@@ -140,7 +141,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        }
 }
 
-static void
+void
 wndwc37e_sema_clr(struct nv50_wndw *wndw)
 {
        u32 *push;
@@ -151,7 +152,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
        }
 }
 
-static void
+void
 wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        u32 *push;
@@ -165,7 +166,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        }
 }
 
-static void
+void
 wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
 {
        u32 *push;
@@ -183,13 +184,13 @@ wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
        }
 }
 
-static void
+void
 wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
                 struct nv50_head_atom *asyh)
 {
 }
 
-static int
+int
 wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
                 struct nv50_head_atom *asyh)
 {
@@ -236,7 +237,7 @@ wndwc37e = {
        .update = wndwc37e_update,
 };
 
-static int
+int
 wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
              enum drm_plane_type type, int index, s32 oclass, u32 heads,
              struct nv50_wndw **pwndw)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
new file mode 100644 (file)
index 0000000..ba89f1a
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/clc37e.h>
+
+static void
+wndwc57e_ilut_clr(struct nv50_wndw *wndw)
+{
+       u32 *push;
+       if ((push = evo_wait(&wndw->wndw, 2))) {
+               evo_mthd(push, 0x0444, 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, &wndw->wndw);
+       }
+}
+
+static void
+wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+       u32 *push;
+       if ((push = evo_wait(&wndw->wndw, 4))) {
+               evo_mthd(push, 0x0440, 3);
+               evo_data(push, asyw->xlut.i.size << 8 |
+                              asyw->xlut.i.mode << 2 |
+                              asyw->xlut.i.output_mode);
+               evo_data(push, asyw->xlut.handle);
+               evo_data(push, asyw->xlut.i.offset >> 8);
+               evo_kick(push, &wndw->wndw);
+       }
+}
+
+static u16
+fixedU0_16_FP16(u16 fixed)
+{
+        int sign = 0, exp = 0, man = 0;
+        if (fixed) {
+                while (--exp && !(fixed & 0x8000))
+                        fixed <<= 1;
+                man = ((fixed << 1) & 0xffc0) >> 6;
+                exp += 15;
+        }
+        return (sign << 15) | (exp << 10) | man;
+}
+
+static void
+wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+       memset_io(mem, 0x00, 0x20); /* VSS header. */
+       mem += 0x20;
+
+       for (; size--; in++, mem += 0x08) {
+               u16 r = fixedU0_16_FP16(drm_color_lut_extract(in->  red, 16));
+               u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
+               u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
+               writew(r, mem + 0);
+               writew(g, mem + 2);
+               writew(b, mem + 4);
+       }
+
+       /* INTERPOLATE modes require a "next" entry to interpolate with,
+        * so we replicate the last entry to deal with this for now.
+        */
+       writew(readw(mem - 8), mem + 0);
+       writew(readw(mem - 6), mem + 2);
+       writew(readw(mem - 4), mem + 4);
+}
+
+static void
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+       u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+       if (size == 256) {
+               asyw->xlut.i.mode = 1; /* DIRECT8. */
+       } else {
+               asyw->xlut.i.mode = 2; /* DIRECT10. */
+               size = 1024;
+       }
+       asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
+       asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+       asyw->xlut.i.load = wndwc57e_ilut_load;
+}
+
+static const struct nv50_wndw_func
+wndwc57e = {
+       .acquire = wndwc37e_acquire,
+       .release = wndwc37e_release,
+       .sema_set = wndwc37e_sema_set,
+       .sema_clr = wndwc37e_sema_clr,
+       .ntfy_set = wndwc37e_ntfy_set,
+       .ntfy_clr = wndwc37e_ntfy_clr,
+       .ntfy_reset = corec37d_ntfy_init,
+       .ntfy_wait_begun = base507c_ntfy_wait_begun,
+       .ilut = wndwc57e_ilut,
+       .ilut_identity = true,
+       .xlut_set = wndwc57e_ilut_set,
+       .xlut_clr = wndwc57e_ilut_clr,
+       .image_set = wndwc37e_image_set,
+       .image_clr = wndwc37e_image_clr,
+       .update = wndwc37e_update,
+};
+
+int
+wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+            s32 oclass, struct nv50_wndw **pwndw)
+{
+       return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
+                            BIT(index >> 1), pwndw);
+}
index 4f5233107f5f83d4ea2abbf8a639278f6588e577..4cbed03293676742c5e5b2b72f50537df63a0e1e 100644 (file)
@@ -32,6 +32,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_MAXWELL                                          0x09
 #define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
+#define NV_DEVICE_INFO_V0_TURING                                           0x0c
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
index fbfcffc5feb2b4440572a317ed2ae41aff4bd2fe..81401eb970ea68798356a667e7249cfea9d34fda 100644 (file)
@@ -4,12 +4,13 @@
 
 struct kepler_channel_gpfifo_a_v0 {
        __u8  version;
-       __u8  pad01[1];
+       __u8  priv;
        __u16 chid;
        __u32 ilength;
        __u64 ioffset;
        __u64 runlist;
        __u64 vmm;
+       __u64 inst;
 };
 
 #define NVA06F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
index 6db56bd7d67eb1b6d745ff26d09e57dfe2a81ebc..1d82cbf70cf448784e388e3e2909f5e33d568daa 100644 (file)
@@ -68,7 +68,8 @@
 #define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
 #define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
-#define VOLTA_CHANNEL_GPFIFO_A                        /* cla06f.h */ 0x0000c36f
+#define VOLTA_CHANNEL_GPFIFO_A                        /* clc36f.h */ 0x0000c36f
+#define TURING_CHANNEL_GPFIFO_A                       /* clc36f.h */ 0x0000c46f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
@@ -83,6 +84,7 @@
 #define GP100_DISP                                    /* cl5070.h */ 0x00009770
 #define GP102_DISP                                    /* cl5070.h */ 0x00009870
 #define GV100_DISP                                    /* cl5070.h */ 0x0000c370
+#define TU104_DISP                                    /* cl5070.h */ 0x0000c570
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -95,6 +97,7 @@
 #define GF110_DISP_CURSOR                             /* cl507a.h */ 0x0000907a
 #define GK104_DISP_CURSOR                             /* cl507a.h */ 0x0000917a
 #define GV100_DISP_CURSOR                             /* cl507a.h */ 0x0000c37a
+#define TU104_DISP_CURSOR                             /* cl507a.h */ 0x0000c57a
 
 #define NV50_DISP_OVERLAY                             /* cl507b.h */ 0x0000507b
 #define G82_DISP_OVERLAY                              /* cl507b.h */ 0x0000827b
 #define GK104_DISP_OVERLAY                            /* cl507b.h */ 0x0000917b
 
 #define GV100_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c37b
+#define TU104_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c57b
 
 #define NV50_DISP_BASE_CHANNEL_DMA                    /* cl507c.h */ 0x0000507c
 #define G82_DISP_BASE_CHANNEL_DMA                     /* cl507c.h */ 0x0000827c
 #define GP100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000977d
 #define GP102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c37d
+#define TU104_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c57d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
 #define GK104_DISP_OVERLAY_CONTROL_DMA                /* cl507e.h */ 0x0000917e
 
 #define GV100_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c37e
+#define TU104_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c57e
 
 #define NV50_TESLA                                                   0x00005097
 #define G82_TESLA                                                    0x00008297
 #define PASCAL_DMA_COPY_A                                            0x0000c0b5
 #define PASCAL_DMA_COPY_B                                            0x0000c1b5
 #define VOLTA_DMA_COPY_A                                             0x0000c3b5
+#define TURING_DMA_COPY_A                                            0x0000c5b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
new file mode 100644 (file)
index 0000000..6b14d7e
--- /dev/null
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC36F_H__
+#define __NVIF_CLC36F_H__
+
+struct volta_channel_gpfifo_a_v0 {
+       __u8  version;
+       __u8  priv;
+       __u16 chid;
+       __u32 ilength;
+       __u64 ioffset;
+       __u64 runlist;
+       __u64 vmm;
+       __u64 inst;
+       __u32 token;
+};
+
+#define NVC36F_V0_NTFY_NON_STALL_INTERRUPT                                 0x00
+#define NVC36F_V0_NTFY_KILLED                                              0x01
+#endif
index d83d834b745217940a9125c8ce99930aad6ac79a..72e4dc1f02360ff6856a671048328f325718f994 100644 (file)
@@ -61,7 +61,11 @@ enum nvkm_devidx {
        NVKM_ENGINE_NVENC2,
        NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
 
-       NVKM_ENGINE_NVDEC,
+       NVKM_ENGINE_NVDEC0,
+       NVKM_ENGINE_NVDEC1,
+       NVKM_ENGINE_NVDEC2,
+       NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
+
        NVKM_ENGINE_PM,
        NVKM_ENGINE_SEC,
        NVKM_ENGINE_SEC2,
@@ -114,6 +118,7 @@ struct nvkm_device {
                GM100    = 0x110,
                GP100    = 0x130,
                GV100    = 0x140,
+               TU100    = 0x160,
        } card_type;
        u32 chipset;
        u8  chiprev;
@@ -163,7 +168,7 @@ struct nvkm_device {
        struct nvkm_engine *msppp;
        struct nvkm_engine *msvld;
        struct nvkm_engine *nvenc[3];
-       struct nvkm_nvdec *nvdec;
+       struct nvkm_nvdec *nvdec[3];
        struct nvkm_pm *pm;
        struct nvkm_engine *sec;
        struct nvkm_sec2 *sec2;
@@ -235,7 +240,7 @@ struct nvkm_device_chip {
        int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
-       int (*nvdec   )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
+       int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
        int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
        int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*sec2    )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
index 05f505de0075f5256b39814292789ae56b8d044f..f34c80310861ed9446342e304ec385ed775a610d 100644 (file)
@@ -29,6 +29,7 @@ struct nvkm_memory_func {
        void *(*dtor)(struct nvkm_memory *);
        enum nvkm_memory_target (*target)(struct nvkm_memory *);
        u8 (*page)(struct nvkm_memory *);
+       u64 (*bar2)(struct nvkm_memory *);
        u64 (*addr)(struct nvkm_memory *);
        u64 (*size)(struct nvkm_memory *);
        void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
@@ -56,6 +57,7 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
 
 #define nvkm_memory_target(p) (p)->func->target(p)
 #define nvkm_memory_page(p) (p)->func->page(p)
+#define nvkm_memory_bar2(p) (p)->func->bar2(p)
 #define nvkm_memory_addr(p) (p)->func->addr(p)
 #define nvkm_memory_size(p) (p)->func->size(p)
 #define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
index fc295e1faa198282118739759eb47c33d4866542..86abe76023c230cdb212c41efd246b0993052fc5 100644 (file)
@@ -11,4 +11,5 @@ int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int tu104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
index ef7dc0844d26e4b6f6268cae18224026d1237318..5ca86e178bb98f5eb857d4cd2624efa533a2457c 100644 (file)
@@ -36,4 +36,5 @@ int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int tu104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
index 7e39fbed2519cb104658c1e8195f26a47788b453..3b2b685778eb15d57310fabd0a690b6a27cd0610 100644 (file)
@@ -74,4 +74,5 @@ int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int tu104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
index f6bd94c7e0f75fa71586a41270a1865c6c702a11..fd9d713b611cf4a133985c6311ac53e75fcce86b 100644 (file)
@@ -16,8 +16,10 @@ struct nvkm_bar {
 };
 
 struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar1_reset(struct nvkm_device *);
 void nvkm_bar_bar2_init(struct nvkm_device *);
 void nvkm_bar_bar2_fini(struct nvkm_device *);
+void nvkm_bar_bar2_reset(struct nvkm_device *);
 struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
 void nvkm_bar_flush(struct nvkm_bar *);
 
@@ -27,4 +29,5 @@ int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int tu104_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
 #endif
index 703a5b524b96c7bd445f35a79bf402f60e7f6d4b..425ccc47e3b778740b2cf8101a6b8c71402fa4ec 100644 (file)
@@ -12,11 +12,14 @@ u32 nvbios_M0203Tp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
                   struct nvbios_M0203T *);
 
 struct nvbios_M0203E {
-#define M0203E_TYPE_DDR2  0x0
-#define M0203E_TYPE_DDR3  0x1
-#define M0203E_TYPE_GDDR3 0x2
-#define M0203E_TYPE_GDDR5 0x3
-#define M0203E_TYPE_SKIP  0xf
+#define M0203E_TYPE_DDR2   0x0
+#define M0203E_TYPE_DDR3   0x1
+#define M0203E_TYPE_GDDR3  0x2
+#define M0203E_TYPE_GDDR5  0x3
+#define M0203E_TYPE_HBM2   0x6
+#define M0203E_TYPE_GDDR5X 0x8
+#define M0203E_TYPE_GDDR6  0x9
+#define M0203E_TYPE_SKIP   0xf
        u8 type;
        u8 strap;
        u8 group;
index ed9e0a6a001190fed601b6af8255eed621f0307a..8463b421d34542796c605c4b77beccfaf7094bd3 100644 (file)
@@ -20,6 +20,7 @@ enum dcb_connector_type {
        DCB_CONNECTOR_DMS59_DP0 = 0x64,
        DCB_CONNECTOR_DMS59_DP1 = 0x65,
        DCB_CONNECTOR_WFD       = 0x70,
+       DCB_CONNECTOR_USB_C = 0x71,
        DCB_CONNECTOR_NONE = 0xff
 };
 
index 486e7635c29d7aa12b7ddd6f3de0b443c03f8c64..1b71812a790bcd1ec8068e3233a014024a265fba 100644 (file)
@@ -31,4 +31,5 @@ int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int tu104_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
index 5a77498fe6a09a304c1d63cf297cc3977c9bf5ac..127f48066026eaa55dea0c3d7cb666b9767ed17a 100644 (file)
@@ -30,4 +30,5 @@ struct nvkm_fault_data {
 
 int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int tu104_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
 #endif
index 96ccc624ee8144ded1208bf276b7477598976470..27298f8b7ead09c050a9761a8bfd0637b80a8587 100644 (file)
@@ -105,7 +105,10 @@ enum nvkm_ram_type {
        NVKM_RAM_TYPE_GDDR2,
        NVKM_RAM_TYPE_GDDR3,
        NVKM_RAM_TYPE_GDDR4,
-       NVKM_RAM_TYPE_GDDR5
+       NVKM_RAM_TYPE_GDDR5,
+       NVKM_RAM_TYPE_GDDR5X,
+       NVKM_RAM_TYPE_GDDR6,
+       NVKM_RAM_TYPE_HBM2,
 };
 
 struct nvkm_ram {
index 61c93c86e2e2473113ea1ed7f0b49b4c8b1a23f7..b66dedd8abb6aae949058a2ccc57c86ea5ea4f65 100644 (file)
@@ -31,4 +31,5 @@ int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int tu104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
index 688595545e21ed7b2700953f9a698a06f40b81de..0a0e064f22e5962b06405a05ea42e9d6f47de18a 100644 (file)
@@ -130,4 +130,5 @@ int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int tu104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
 #endif
index e9b0746826ca66f9ac1765bbf8e757e1ee57a208..3693ebf371b647d0e7ba1044861f5417da6e332f 100644 (file)
@@ -28,6 +28,18 @@ struct nvkm_timer {
 u64 nvkm_timer_read(struct nvkm_timer *);
 void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
 
+struct nvkm_timer_wait {
+       struct nvkm_timer *tmr;
+       u64 limit;
+       u64 time0;
+       u64 time1;
+       int reads;
+};
+
+void nvkm_timer_wait_init(struct nvkm_device *, u64 nsec,
+                         struct nvkm_timer_wait *);
+s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
+
 /* Delay based on GPU time (ie. PTIMER).
  *
  * Will return -ETIMEDOUT unless the loop was terminated with 'break',
@@ -38,21 +50,17 @@ void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
  */
 #define NVKM_DELAY _warn = false;
 #define nvkm_nsec(d,n,cond...) ({                                              \
-       struct nvkm_device *_device = (d);                                     \
-       struct nvkm_timer *_tmr = _device->timer;                              \
-       u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr);                      \
-       s64 _taken = 0;                                                        \
+       struct nvkm_timer_wait _wait;                                          \
        bool _warn = true;                                                     \
+       s64 _taken = 0;                                                        \
                                                                                \
+       nvkm_timer_wait_init((d), (n), &_wait);                                \
        do {                                                                   \
                cond                                                           \
-       } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs);    \
+       } while ((_taken = nvkm_timer_wait_test(&_wait)) >= 0);                \
                                                                                \
-       if (_taken >= _nsecs) {                                                \
-               if (_warn)                                                     \
-                       dev_WARN(_device->dev, "timeout\n");                   \
-               _taken = -ETIMEDOUT;                                           \
-       }                                                                      \
+       if (_warn && _taken < 0)                                               \
+               dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n");          \
        _taken;                                                                \
 })
 #define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
index e67a471331b514b75321c4bf8a0fff1be7057113..b06cdac8f3a2bf0e0041a865cf3103379f1e11ce 100644 (file)
@@ -306,7 +306,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 
        /* create channel object and initialise dma and fence management */
        ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
-                                 init->tt_ctxdma_handle, &chan->chan);
+                                 init->tt_ctxdma_handle, false, &chan->chan);
        if (ret)
                goto done;
 
index 7214022dfb91187ae519e51905a5512b542abe70..73eff52036d2a9785d65cbfd4f8ab8a4d6824878 100644 (file)
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_mem_reg *, struct ttm_mem_reg *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
+               {  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
index 92d3115f96b5090889281c62edf219bbdb64dd66..668afbc29c3e362b055c9b9bc0f944ff4ebf8e2d 100644 (file)
@@ -29,6 +29,7 @@
 #include <nvif/cl506f.h>
 #include <nvif/cl906f.h>
 #include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
 #include <nvif/ioctl.h>
 
 /*XXX*/
@@ -217,10 +218,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
 
 static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
-                   u64 runlist, struct nouveau_channel **pchan)
+                   u64 runlist, bool priv, struct nouveau_channel **pchan)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
-       static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+                                       VOLTA_CHANNEL_GPFIFO_A,
                                        PASCAL_CHANNEL_GPFIFO_A,
                                        MAXWELL_CHANNEL_GPFIFO_A,
                                        KEPLER_CHANNEL_GPFIFO_B,
@@ -234,6 +236,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                struct nv50_channel_gpfifo_v0 nv50;
                struct fermi_channel_gpfifo_v0 fermi;
                struct kepler_channel_gpfifo_a_v0 kepler;
+               struct volta_channel_gpfifo_a_v0 volta;
        } args;
        struct nouveau_channel *chan;
        u32 size;
@@ -247,12 +250,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 
        /* create channel object */
        do {
+               if (oclass[0] >= VOLTA_CHANNEL_GPFIFO_A) {
+                       args.volta.version = 0;
+                       args.volta.ilength = 0x02000;
+                       args.volta.ioffset = 0x10000 + chan->push.addr;
+                       args.volta.runlist = runlist;
+                       args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
+                       args.volta.priv = priv;
+                       size = sizeof(args.volta);
+               } else
                if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
                        args.kepler.version = 0;
                        args.kepler.ilength = 0x02000;
                        args.kepler.ioffset = 0x10000 + chan->push.addr;
                        args.kepler.runlist = runlist;
                        args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
+                       args.kepler.priv = priv;
                        size = sizeof(args.kepler);
                } else
                if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
@@ -273,13 +286,20 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                ret = nvif_object_init(&device->object, 0, *oclass++,
                                       &args, size, &chan->user);
                if (ret == 0) {
-                       if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+                       if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
+                               chan->chid = args.volta.chid;
+                               chan->inst = args.volta.inst;
+                               chan->token = args.volta.token;
+                       } else
+                       if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) {
                                chan->chid = args.kepler.chid;
-                       else
-                       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+                               chan->inst = args.kepler.inst;
+                       } else
+                       if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
                                chan->chid = args.fermi.chid;
-                       else
+                       } else {
                                chan->chid = args.nv50.chid;
+                       }
                        return ret;
                }
        } while (*oclass);
@@ -448,7 +468,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 
 int
 nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
-                   u32 arg0, u32 arg1, struct nouveau_channel **pchan)
+                   u32 arg0, u32 arg1, bool priv,
+                   struct nouveau_channel **pchan)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
        bool super;
@@ -458,7 +479,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
        super = cli->base.super;
        cli->base.super = true;
 
-       ret = nouveau_channel_ind(drm, device, arg0, pchan);
+       ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
        if (ret) {
                NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
                ret = nouveau_channel_dma(drm, device, pchan);
index 64454c2ebd908ce82a1b6886ee376dd37d71d489..28418f4e574884376522d6a3d65274231d6eaca5 100644 (file)
@@ -10,6 +10,8 @@ struct nouveau_channel {
        struct nouveau_drm *drm;
 
        int chid;
+       u64 inst;
+       u32 token;
 
        struct nvif_object vram;
        struct nvif_object gart;
@@ -48,7 +50,8 @@ struct nouveau_channel {
 int nouveau_channels_init(struct nouveau_drm *);
 
 int  nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
-                        u32 arg0, u32 arg1, struct nouveau_channel **);
+                        u32 arg0, u32 arg1, bool priv,
+                        struct nouveau_channel **);
 void nouveau_channel_del(struct nouveau_channel **);
 int  nouveau_channel_idle(struct nouveau_channel *);
 
index fd80661dff92691f94c9b91c3799712d04cb9c17..3f463c91314ab6ae6b896a759b1a56b9494807d3 100644 (file)
@@ -403,6 +403,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
        if (nv_connector->aux.transfer) {
                drm_dp_cec_unregister_connector(&nv_connector->aux);
                drm_dp_aux_unregister(&nv_connector->aux);
+               kfree(nv_connector->aux.name);
        }
        kfree(connector);
 }
@@ -1218,7 +1219,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
        case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
        case DCB_CONNECTOR_DMS59_DP0:
        case DCB_CONNECTOR_DMS59_DP1:
-       case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
+       case DCB_CONNECTOR_DP       :
+       case DCB_CONNECTOR_USB_C    : return DRM_MODE_CONNECTOR_DisplayPort;
        case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
        case DCB_CONNECTOR_HDMI_0   :
        case DCB_CONNECTOR_HDMI_1   :
@@ -1232,7 +1234,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
 }
 
 struct drm_connector *
-nouveau_connector_create(struct drm_device *dev, int index)
+nouveau_connector_create(struct drm_device *dev,
+                        const struct dcb_output *dcbe)
 {
        const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
        struct nouveau_drm *drm = nouveau_drm(dev);
@@ -1240,6 +1243,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
        struct nouveau_connector *nv_connector = NULL;
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
+       char aux_name[48] = {0};
+       int index = dcbe->connector;
        int type, ret = 0;
        bool dummy;
 
@@ -1342,6 +1347,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
        case DRM_MODE_CONNECTOR_eDP:
                nv_connector->aux.dev = dev->dev;
                nv_connector->aux.transfer = nouveau_connector_aux_xfer;
+               snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
+                        dcbe->hasht, dcbe->hashm);
+               nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
                ret = drm_dp_aux_register(&nv_connector->aux);
                if (ret) {
                        NV_ERROR(drm, "failed to register aux channel\n");
index f57ef35b1e5e6b6b5ce222bfc1f16fa9c757fa31..f43a8d63aef86e07c7078501ff4bad0e33ba937b 100644 (file)
@@ -38,6 +38,7 @@
 #include "nouveau_encoder.h"
 
 struct nvkm_i2c_port;
+struct dcb_output;
 
 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
 struct nouveau_backlight;
@@ -113,7 +114,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
 }
 
 struct drm_connector *
-nouveau_connector_create(struct drm_device *, int index);
+nouveau_connector_create(struct drm_device *, const struct dcb_output *);
 
 extern int nouveau_tv_disable;
 extern int nouveau_ignorelid;
index 9109b69cd052958bbc126b4bad4f490720e11f4a..88a52f6b39fe333df24c33dce9aef2535d6a1b09 100644 (file)
@@ -46,6 +46,26 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
        return 0;
 }
 
+static int
+nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+       int ret;
+
+       ret = pm_runtime_get_sync(drm->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return ret;
+
+       seq_printf(m, "0x%08x\n",
+                  nvif_rd32(&drm->client.device.object, 0x101000));
+
+       pm_runtime_mark_last_busy(drm->dev->dev);
+       pm_runtime_put_autosuspend(drm->dev->dev);
+
+       return 0;
+}
+
 static int
 nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
 {
@@ -185,7 +205,8 @@ static const struct file_operations nouveau_pstate_fops = {
 };
 
 static struct drm_info_list nouveau_debugfs_list[] = {
-       { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+       { "vbios.rom",  nouveau_debugfs_vbios_image, 0, NULL },
+       { "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
 };
 #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
 
@@ -199,8 +220,9 @@ static const struct nouveau_debugfs_files {
 int
 nouveau_drm_debugfs_init(struct drm_minor *minor)
 {
+       struct nouveau_drm *drm = nouveau_drm(minor->dev);
        struct dentry *dentry;
-       int i;
+       int i, ret;
 
        for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
                dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
@@ -211,9 +233,23 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
                        return -ENOMEM;
        }
 
-       return drm_debugfs_create_files(nouveau_debugfs_list,
-                                       NOUVEAU_DEBUGFS_ENTRIES,
-                                       minor->debugfs_root, minor);
+       ret = drm_debugfs_create_files(nouveau_debugfs_list,
+                                      NOUVEAU_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret)
+               return ret;
+
+       /* Set the size of the vbios since we know it, and it's confusing to
+        * userspace if it wants to seek() but the file has a length of 0
+        */
+       dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
+       if (!dentry)
+               return 0;
+
+       d_inode(dentry)->i_size = drm->vbios.length;
+       dput(dentry);
+
+       return 0;
 }
 
 int
index 945afd34138edc06efe504d1be6583c8cb76c050..078f65d849ce4403839a9d857021612c7ce6e7c8 100644 (file)
@@ -101,7 +101,7 @@ nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
 
        nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
        if (user->func && user->func->doorbell)
-               user->func->doorbell(user, chan->chid);
+               user->func->doorbell(user, chan->token);
        chan->dma.ib_free--;
 }
 
index 2b2baf6e0e0d6bbde2aadefd0a16e55aaadf3736..232c3f6bc35b50da1c111ad46fc38232251d7c97 100644 (file)
@@ -353,6 +353,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case MAXWELL_CHANNEL_GPFIFO_A:
                case PASCAL_CHANNEL_GPFIFO_A:
                case VOLTA_CHANNEL_GPFIFO_A:
+               case TURING_CHANNEL_GPFIFO_A:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
@@ -370,7 +371,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
        if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
                ret = nouveau_channel_new(drm, &drm->client.device,
                                          nvif_fifo_runlist_ce(device), 0,
-                                         &drm->cechan);
+                                         true, &drm->cechan);
                if (ret)
                        NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
@@ -381,7 +382,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
            device->info.chipset != 0xaa &&
            device->info.chipset != 0xac) {
                ret = nouveau_channel_new(drm, &drm->client.device,
-                                         NvDmaFB, NvDmaTT, &drm->cechan);
+                                         NvDmaFB, NvDmaTT, false,
+                                         &drm->cechan);
                if (ret)
                        NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
@@ -393,7 +395,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
        }
 
        ret = nouveau_channel_new(drm, &drm->client.device,
-                                 arg0, arg1, &drm->channel);
+                                 arg0, arg1, false, &drm->channel);
        if (ret) {
                NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
                nouveau_accel_fini(drm);
index 7e3b118cf7c4da6016874aac0c2f2d653db72398..ede872f6f66803873f11dc459c1ff7cea285e9ee 100644 (file)
@@ -25,7 +25,6 @@ void nouveau_vma_unmap(struct nouveau_vma *);
 struct nouveau_vmm {
        struct nouveau_cli *cli;
        struct nvif_vmm vmm;
-       struct nvkm_vm *vm;
 };
 
 int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
index 18c7d064f75c84397e20eba815ec9f97bc7010d9..ef97dd223a32f1e33091a01cc6793d6a6359a018 100644 (file)
@@ -34,6 +34,7 @@ int
 nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
+               { TU104_DISP, -1 },
                { GV100_DISP, -1 },
                { GP102_DISP, -1 },
                { GP100_DISP, -1 },
index 03f676c18aad5240e654739d6092a62ca5f74e4f..c61b467cf45e1c738dfc66670128d396f01fd4e3 100644 (file)
@@ -79,7 +79,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
        [NVKM_ENGINE_NVENC0  ] = "nvenc0",
        [NVKM_ENGINE_NVENC1  ] = "nvenc1",
        [NVKM_ENGINE_NVENC2  ] = "nvenc2",
-       [NVKM_ENGINE_NVDEC   ] = "nvdec",
+       [NVKM_ENGINE_NVDEC0  ] = "nvdec0",
+       [NVKM_ENGINE_NVDEC1  ] = "nvdec1",
+       [NVKM_ENGINE_NVDEC2  ] = "nvdec2",
        [NVKM_ENGINE_PM      ] = "pm",
        [NVKM_ENGINE_SEC     ] = "sec",
        [NVKM_ENGINE_SEC2    ] = "sec2",
index 80d7844419044da450528a442dc2a58eb8511c7b..177a23301d6ab46d99866db5f1241886e01c6fa5 100644 (file)
@@ -6,3 +6,4 @@ nvkm-y += nvkm/engine/ce/gm200.o
 nvkm-y += nvkm/engine/ce/gp100.o
 nvkm-y += nvkm/engine/ce/gp102.o
 nvkm-y += nvkm/engine/ce/gv100.o
+nvkm-y += nvkm/engine/ce/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
new file mode 100644 (file)
index 0000000..3c25043
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu104_ce = {
+       .intr = gp100_ce_intr,
+       .sclass = {
+               { -1, -1, TURING_DMA_COPY_A },
+               {}
+       }
+};
+
+int
+tu104_ce_new(struct nvkm_device *device, int index,
+            struct nvkm_engine **pengine)
+{
+       return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+}
index e294013426ced84844d6950facd1737f63f3205c..bfbc9341e0c21f361746ec81363a687119f97a92 100644 (file)
@@ -2221,7 +2221,7 @@ nv132_chipset = {
        .dma = gf119_dma_new,
        .fifo = gp100_fifo_new,
        .gr = gp102_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
        .sw = gf100_sw_new,
 };
@@ -2257,7 +2257,7 @@ nv134_chipset = {
        .dma = gf119_dma_new,
        .fifo = gp100_fifo_new,
        .gr = gp104_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
        .sw = gf100_sw_new,
 };
@@ -2293,7 +2293,7 @@ nv136_chipset = {
        .dma = gf119_dma_new,
        .fifo = gp100_fifo_new,
        .gr = gp104_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
        .sw = gf100_sw_new,
 };
@@ -2329,7 +2329,7 @@ nv137_chipset = {
        .dma = gf119_dma_new,
        .fifo = gp100_fifo_new,
        .gr = gp107_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
        .sw = gf100_sw_new,
 };
@@ -2365,7 +2365,7 @@ nv138_chipset = {
        .dma = gf119_dma_new,
        .fifo = gp100_fifo_new,
        .gr = gp107_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
        .sw = gf100_sw_new,
 };
@@ -2430,10 +2430,74 @@ nv140_chipset = {
        .dma = gv100_dma_new,
        .fifo = gv100_fifo_new,
        .gr = gv100_gr_new,
-       .nvdec = gp102_nvdec_new,
+       .nvdec[0] = gp102_nvdec_new,
        .sec2 = gp102_sec2_new,
 };
 
+static const struct nvkm_device_chip
+nv164_chipset = {
+       .name = "TU104",
+       .bar = tu104_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = tu104_devinit_new,
+       .fault = tu104_fault_new,
+       .fb = gv100_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp102_ltc_new,
+       .mc = tu104_mc_new,
+       .mmu = tu104_mmu_new,
+       .pci = gp100_pci_new,
+       .pmu = gp102_pmu_new,
+       .therm = gp100_therm_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = tu104_ce_new,
+       .ce[1] = tu104_ce_new,
+       .ce[2] = tu104_ce_new,
+       .ce[3] = tu104_ce_new,
+       .ce[4] = tu104_ce_new,
+       .disp = tu104_disp_new,
+       .dma = gv100_dma_new,
+       .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv166_chipset = {
+       .name = "TU106",
+       .bar = tu104_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = tu104_devinit_new,
+       .fault = tu104_fault_new,
+       .fb = gv100_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp102_ltc_new,
+       .mc = tu104_mc_new,
+       .mmu = tu104_mmu_new,
+       .pci = gp100_pci_new,
+       .pmu = gp102_pmu_new,
+       .therm = gp100_therm_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = tu104_ce_new,
+       .ce[1] = tu104_ce_new,
+       .ce[2] = tu104_ce_new,
+       .ce[3] = tu104_ce_new,
+       .ce[4] = tu104_ce_new,
+       .disp = tu104_disp_new,
+       .dma = gv100_dma_new,
+       .fifo = tu104_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2529,7 +2593,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
        _(NVENC0 , device->nvenc[0],  device->nvenc[0]);
        _(NVENC1 , device->nvenc[1],  device->nvenc[1]);
        _(NVENC2 , device->nvenc[2],  device->nvenc[2]);
-       _(NVDEC  , device->nvdec   , &device->nvdec->engine);
+       _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
+       _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
+       _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
        _(PM     , device->pm      , &device->pm->engine);
        _(SEC    , device->sec     ,  device->sec);
        _(SEC2   , device->sec2    , &device->sec2->engine);
@@ -2791,6 +2857,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x120: device->card_type = GM100; break;
                        case 0x130: device->card_type = GP100; break;
                        case 0x140: device->card_type = GV100; break;
+                       case 0x160: device->card_type = TU100; break;
                        default:
                                break;
                        }
@@ -2883,6 +2950,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x138: device->chip = &nv138_chipset; break;
                case 0x13b: device->chip = &nv13b_chipset; break;
                case 0x140: device->chip = &nv140_chipset; break;
+               case 0x164: device->chip = &nv164_chipset; break;
+               case 0x166: device->chip = &nv166_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
@@ -2988,7 +3057,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                _(NVKM_ENGINE_NVENC0  , nvenc[0]);
                _(NVKM_ENGINE_NVENC1  , nvenc[1]);
                _(NVKM_ENGINE_NVENC2  , nvenc[2]);
-               _(NVKM_ENGINE_NVDEC   ,    nvdec);
+               _(NVKM_ENGINE_NVDEC0  , nvdec[0]);
+               _(NVKM_ENGINE_NVDEC1  , nvdec[1]);
+               _(NVKM_ENGINE_NVDEC2  , nvdec[2]);
                _(NVKM_ENGINE_PM      ,       pm);
                _(NVKM_ENGINE_SEC     ,      sec);
                _(NVKM_ENGINE_SEC2    ,     sec2);
index dde6bbafa709f781e776434c374664dff037c9a6..092ddc4ffefac09c51b1ec705ff36f746434040d 100644 (file)
@@ -91,7 +91,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
        case ENGINE_A(MSENC ); break;
        case ENGINE_A(VIC   ); break;
        case ENGINE_A(SEC2  ); break;
-       case ENGINE_A(NVDEC ); break;
+       case ENGINE_B(NVDEC ); break;
        case ENGINE_B(NVENC ); break;
        default:
                args->mthd = NV_DEVICE_INFO_INVALID;
@@ -175,6 +175,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
        case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
        case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
+       case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
        default:
                args->v0.family = 0;
                break;
index 8089ac9a12e2673b252b6377eef4f30afc934dce..c6a257ba43476dec8f43d3d431f765a792ed6abc 100644 (file)
@@ -15,6 +15,7 @@ nvkm-y += nvkm/engine/disp/gm200.o
 nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
+nvkm-y += nvkm/engine/disp/tu104.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
@@ -38,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgk104.o
 nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/sorgv100.o
+nvkm-y += nvkm/engine/disp/sortu104.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -69,6 +71,7 @@ nvkm-y += nvkm/engine/disp/rootgm200.o
 nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
+nvkm-y += nvkm/engine/disp/roottu104.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
index d0a7e3456da1661029b48600f90562be1dac6faf..47be0ba4aebe2df17f6412160c10cdad417e1447 100644 (file)
@@ -28,7 +28,7 @@
 #include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-static int
+int
 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
@@ -36,7 +36,7 @@ gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
        return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
 }
 
-static void
+void
 gv100_disp_super(struct work_struct *work)
 {
        struct nv50_disp *disp =
@@ -257,7 +257,7 @@ gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
        }
 }
 
-static void
+void
 gv100_disp_intr(struct nv50_disp *disp)
 {
        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -297,7 +297,7 @@ gv100_disp_intr(struct nv50_disp *disp)
                nvkm_warn(subdev, "intr %08x\n", stat);
 }
 
-static void
+void
 gv100_disp_fini(struct nv50_disp *disp)
 {
        struct nvkm_device *device = disp->base.engine.subdev.device;
index 0f0c86c32ec3affd8472948a7232cc17647dd028..790e42f460fdc78a2e83cc249cf373b84ee72bd3 100644 (file)
@@ -144,6 +144,11 @@ void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
 int gm200_sor_route_get(struct nvkm_outp *, int *);
 void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
 
+void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
+void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
+void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
+void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+
 void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -195,4 +200,6 @@ int gm200_sor_new(struct nvkm_disp *, int);
 
 int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gv100_sor_new(struct nvkm_disp *, int);
+
+int tu104_sor_new(struct nvkm_disp *, int);
 #endif
index 8580382ab248949ebf8e21dd28cada7b34055a17..c36a8a7cafa16184585330ddfb7b89c490b4f4dc 100644 (file)
@@ -78,6 +78,11 @@ void gf119_disp_intr(struct nv50_disp *);
 void gf119_disp_super(struct work_struct *);
 void gf119_disp_intr_error(struct nv50_disp *, int);
 
+void gv100_disp_fini(struct nv50_disp *);
+void gv100_disp_intr(struct nv50_disp *);
+void gv100_disp_super(struct work_struct *);
+int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+
 void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_update_sppll1(struct nv50_disp *);
index 6ca4f9184b51528b15875b8f8f05fa59435aef59..97de928cbde131b4a4a7176361f24fd964006f8c 100644 (file)
@@ -37,4 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
 extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
new file mode 100644 (file)
index 0000000..ad438c6
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+tu104_disp_root = {
+       .user = {
+               {{0,0,TU104_DISP_CURSOR                }, gv100_disp_curs_new },
+               {{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+               {{0,0,TU104_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+               {{0,0,TU104_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+               {}
+       },
+};
+
+static int
+tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+                                  data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+tu104_disp_root_oclass = {
+       .base.oclass = TU104_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = tu104_disp_root_new,
+};
index 8ba881a729eecd6c89dda224313615bab3f17e1f..b0597ff9a7149fb256fa259caf50a78d2c5f89cd 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <subdev/timer.h>
 
-static void
+void
 gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
 {
        struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -31,7 +31,7 @@ gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
        nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
 }
 
-static void
+void
 gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
 {
        struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -40,7 +40,7 @@ gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
        nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
 }
 
-static void
+void
 gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
 {
        struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -54,7 +54,7 @@ gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
        );
 }
 
-static void
+void
 gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
 {
        struct nvkm_device *device = sor->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
new file mode 100644 (file)
index 0000000..df026a5
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+                 u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 hoff = head * 0x800;
+
+       nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
+       nvkm_mask(device, 0x616578 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+}
+
+static int
+tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 soff = nv50_ior_base(sor);
+       const u32 loff = nv50_sor_link(sor);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+
+       clksor |= sor->dp.bw << 18;
+       dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+       if (sor->dp.mst)
+               dpctrl |= 0x40000000;
+       if (sor->dp.ef)
+               dpctrl |= 0x00004000;
+
+       nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+       /*XXX*/
+       nvkm_msec(device, 40, NVKM_DELAY);
+       nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+       nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+       nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+       return 0;
+}
+
+static const struct nvkm_ior_func
+tu104_sor = {
+       .route = {
+               .get = gm200_sor_route_get,
+               .set = gm200_sor_route_set,
+       },
+       .state = gv100_sor_state,
+       .power = nv50_sor_power,
+       .clock = gf119_sor_clock,
+       .hdmi = {
+               .ctrl = gv100_hdmi_ctrl,
+       },
+       .dp = {
+               .lanes = { 0, 1, 2, 3 },
+               .links = tu104_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gm107_sor_dp_pattern,
+               .drive = gm200_sor_dp_drive,
+               .vcpi = tu104_sor_dp_vcpi,
+               .audio = gv100_sor_dp_audio,
+               .audio_sym = gv100_sor_dp_audio_sym,
+               .watermark = gv100_sor_dp_watermark,
+       },
+       .hda = {
+               .hpd = gf119_hda_hpd,
+               .eld = gf119_hda_eld,
+       },
+};
+
+int
+tu104_sor_new(struct nvkm_disp *disp, int id)
+{
+       return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
new file mode 100644 (file)
index 0000000..13fa214
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
+static int
+tu104_disp_init(struct nv50_disp *disp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       struct nvkm_head *head;
+       int i, j;
+       u32 tmp;
+
+       /* Claim ownership of display. */
+       if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
+               nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
+               if (nvkm_msec(device, 2000,
+                       if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
+                               break;
+               ) < 0)
+                       return -EBUSY;
+       }
+
+       /* Lock pin capabilities. */
+       tmp = 0x00000021; /*XXX*/
+       nvkm_wr32(device, 0x640008, tmp);
+
+       /* SOR capabilities. */
+       for (i = 0; i < disp->sor.nr; i++) {
+               tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+               nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
+               nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
+       }
+
+       /* Head capabilities. */
+       list_for_each_entry(head, &disp->base.head, head) {
+               const int id = head->id;
+
+               /* RG. */
+               tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
+               nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
+
+               /* POSTCOMP. */
+               for (j = 0; j < 5 * 4; j += 4) {
+                       tmp = nvkm_rd32(device, 0x616140 + (id * 0x800) + j);
+                       nvkm_wr32(device, 0x640680 + (id * 0x20) + j, tmp);
+               }
+       }
+
+       /* Window capabilities. */
+       for (i = 0; i < disp->wndw.nr; i++) {
+               nvkm_mask(device, 0x640004, 1 << i, 1 << i);
+               for (j = 0; j < 6 * 4; j += 4) {
+                       tmp = nvkm_rd32(device, 0x630100 + (i * 0x800) + j);
+                       nvkm_mask(device, 0x640780 + (i * 0x20) + j, 0xffffffff, tmp);
+               }
+               nvkm_mask(device, 0x64000c, 0x00000100, 0x00000100);
+       }
+
+       /* IHUB capabilities. */
+       for (i = 0; i < 3; i++) {
+               tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
+               nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
+       }
+
+       nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
+
+       /* Setup instance memory. */
+       switch (nvkm_memory_target(disp->inst->memory)) {
+       case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
+       case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
+       case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
+       default:
+               break;
+       }
+       nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
+       nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
+
+       /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
+       nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
+       nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
+
+       /* EXC_OTHER: CURSn, CORE. */
+       nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
+                                   0x00000001); /* MSK. */
+       nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
+
+       /* EXC_WINIM. */
+       nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
+       nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
+
+       /* EXC_WIN. */
+       nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
+       nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
+
+       /* HEAD_TIMING(n): VBLANK. */
+       list_for_each_entry(head, &disp->base.head, head) {
+               const u32 hoff = head->id * 4;
+               nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
+               nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
+       }
+
+       /* OR. */
+       nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
+       nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
+       return 0;
+}
+
+static const struct nv50_disp_func
+tu104_disp = {
+       .init = tu104_disp_init,
+       .fini = gv100_disp_fini,
+       .intr = gv100_disp_intr,
+       .uevent = &gv100_disp_chan_uevent,
+       .super = gv100_disp_super,
+       .root = &tu104_disp_root_oclass,
+       .wndw = { .cnt = gv100_disp_wndw_cnt },
+       .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+       .sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+       .ramht_size = 0x2000,
+};
+
+int
+tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+}
index 98911805aabf6375852f9bd00df89433fce2387f..5d3b641dbb141decd8f187f038ad99d0737e8413 100644 (file)
@@ -118,7 +118,7 @@ gv100_disp_wndw_mthd_base = {
 
 const struct nv50_disp_chan_mthd
 gv100_disp_wndw_mthd = {
-       .name = "Base",
+       .name = "Window",
        .addr = 0x001000,
        .prev = 0x000800,
        .data = {
index f00408577a6afaac015bb13b2a3c16d6241680f1..87d8e054e40ae4d8ff3ab30d870363b0e28afbeb 100644 (file)
@@ -16,6 +16,7 @@ nvkm-y += nvkm/engine/fifo/gm20b.o
 nvkm-y += nvkm/engine/fifo/gp100.o
 nvkm-y += nvkm/engine/fifo/gp10b.o
 nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu104.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -33,5 +34,7 @@ nvkm-y += nvkm/engine/fifo/gpfifog84.o
 nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
 nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+nvkm-y += nvkm/engine/fifo/gpfifotu104.o
 
 nvkm-y += nvkm/engine/fifo/usergv100.o
+nvkm-y += nvkm/engine/fifo/usertu104.o
index 3ffef236189e6e1781af17a34cae7692ec2f4a1d..2c7c5afc1ea5675ab4dcf44d73400393e71a3f29 100644 (file)
@@ -17,6 +17,7 @@ struct nvkm_fifo_chan_func {
                            bool suspend);
        int  (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
        void (*object_dtor)(struct nvkm_fifo_chan *, int);
+       u32 (*submit_token)(struct nvkm_fifo_chan *);
 };
 
 int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
index 8e28ba6b23074bdfb6cf790b9c18d7adf31996b3..a14545d871d8efc2c3d352d567ef618d3c3ff8b5 100644 (file)
@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
        struct list_head head;
        bool killed;
 
+       struct nvkm_memory *mthd;
+
        struct {
                struct nvkm_gpuobj *inst;
                struct nvkm_vma *vma;
@@ -36,4 +38,15 @@ int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
 
 int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
                          void *data, u32 size, struct nvkm_object **);
+int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
+                          struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
+                          u64 *, bool, u32 *, const struct nvkm_oclass *,
+                          struct nvkm_object **);
+int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
+                                 struct nvkm_engine *);
+int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
+                                 struct nvkm_engine *, bool);
+
+int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+                         void *data, u32 size, struct nvkm_object **);
 #endif
index f6957686816433adbddee81f7e8c37fa694ee374..10a2e7039a7522a51b1d05326e8fe93db61bb1e5 100644 (file)
@@ -346,10 +346,10 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
        if (eu && eu->data2) {
                switch (eu->data2) {
                case NVKM_SUBDEV_BAR:
-                       nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+                       nvkm_bar_bar1_reset(device);
                        break;
                case NVKM_SUBDEV_INSTMEM:
-                       nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+                       nvkm_bar_bar2_reset(device);
                        break;
                case NVKM_ENGINE_IFB:
                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
index afccf9721cf0af3c7759cba6d406ac37c6c96d33..1053fe7964661ce809b4ab78e87ec04049d9180f 100644 (file)
@@ -149,16 +149,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
 }
 
 void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+                         struct nvkm_memory *mem, int nr)
+{
+       struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       int target;
+
+       switch (nvkm_memory_target(mem)) {
+       case NVKM_MEM_TARGET_VRAM: target = 0; break;
+       case NVKM_MEM_TARGET_NCOH: target = 3; break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+
+       nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+                                   (target << 28));
+       nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+       if (nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+                       break;
+       ) < 0)
+               nvkm_error(subdev, "runlist %d update timeout\n", runl);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
 {
        const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
        struct gk104_fifo_chan *chan;
        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
-       struct nvkm_device *device = subdev->device;
        struct nvkm_memory *mem;
        struct nvkm_fifo_cgrp *cgrp;
        int nr = 0;
-       int target;
 
        mutex_lock(&subdev->mutex);
        mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
@@ -177,24 +202,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
        }
        nvkm_done(mem);
 
-       switch (nvkm_memory_target(mem)) {
-       case NVKM_MEM_TARGET_VRAM: target = 0; break;
-       case NVKM_MEM_TARGET_NCOH: target = 3; break;
-       default:
-               WARN_ON(1);
-               goto unlock;
-       }
-
-       nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
-                                   (target << 28));
-       nvkm_wr32(device, 0x002274, (runl << 20) | nr);
-
-       if (nvkm_msec(device, 2000,
-               if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
-                       break;
-       ) < 0)
-               nvkm_error(subdev, "runlist %d update timeout\n", runl);
-unlock:
+       func->commit(fifo, runl, mem, nr);
        mutex_unlock(&subdev->mutex);
 }
 
@@ -238,6 +246,29 @@ const struct gk104_fifo_runlist_func
 gk104_fifo_runlist = {
        .size = 8,
        .chan = gk104_fifo_runlist_chan,
+       .commit = gk104_fifo_runlist_commit,
+};
+
+void
+gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+}
+
+int
+gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       /* Determine number of PBDMAs by checking valid enable bits. */
+       nvkm_wr32(device, 0x000204, 0xffffffff);
+       return hweight32(nvkm_rd32(device, 0x000204));
+}
+
+const struct gk104_fifo_pbdma_func
+gk104_fifo_pbdma = {
+       .nr = gk104_fifo_pbdma_nr,
+       .init = gk104_fifo_pbdma_init,
 };
 
 static void
@@ -267,7 +298,7 @@ gk104_fifo_recover_work(struct work_struct *w)
        }
 
        for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
-               gk104_fifo_runlist_commit(fifo, runl);
+               gk104_fifo_runlist_update(fifo, runl);
 
        nvkm_wr32(device, 0x00262c, runm);
        nvkm_mask(device, 0x002630, runm, 0x00000000);
@@ -456,10 +487,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
        if (ee && ee->data2) {
                switch (ee->data2) {
                case NVKM_SUBDEV_BAR:
-                       nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+                       nvkm_bar_bar1_reset(device);
                        break;
                case NVKM_SUBDEV_INSTMEM:
-                       nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+                       nvkm_bar_bar2_reset(device);
                        break;
                case NVKM_ENGINE_IFB:
                        nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
@@ -904,9 +935,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
        enum nvkm_devidx engidx;
        u32 *map;
 
-       /* Determine number of PBDMAs by checking valid enable bits. */
-       nvkm_wr32(device, 0x000204, 0xffffffff);
-       fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+       fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
        nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
 
        /* Read PBDMA->runlist(s) mapping from HW. */
@@ -978,7 +1007,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
        int i;
 
        /* Enable PBDMAs. */
-       nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+       fifo->func->pbdma->init(fifo);
 
        /* PBDMA[n] */
        for (i = 0; i < fifo->pbdma_nr; i++) {
@@ -995,8 +1024,8 @@ gk104_fifo_init(struct nvkm_fifo *base)
 
        nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
 
-       if (fifo->func->init_pbdma_timeout)
-               fifo->func->init_pbdma_timeout(fifo);
+       if (fifo->func->pbdma->init_timeout)
+               fifo->func->pbdma->init_timeout(fifo);
 
        nvkm_wr32(device, 0x002100, 0xffffffff);
        nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -1175,6 +1204,7 @@ gk104_fifo_fault_gpcclient[] = {
 
 static const struct gk104_fifo_func
 gk104_fifo = {
+       .pbdma = &gk104_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gk104_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index d295b81e18d6c398ed115fd286ee253fd25ca634..d4e565658f46af8ae2bd502b6347e0ba8358cd45 100644 (file)
@@ -45,7 +45,11 @@ struct gk104_fifo {
 };
 
 struct gk104_fifo_func {
-       void (*init_pbdma_timeout)(struct gk104_fifo *);
+       const struct gk104_fifo_pbdma_func {
+               int (*nr)(struct gk104_fifo *);
+               void (*init)(struct gk104_fifo *);
+               void (*init_timeout)(struct gk104_fifo *);
+       } *pbdma;
 
        struct {
                const struct nvkm_enum *access;
@@ -61,6 +65,8 @@ struct gk104_fifo_func {
                             struct nvkm_memory *, u32 offset);
                void (*chan)(struct gk104_fifo_chan *,
                             struct nvkm_memory *, u32 offset);
+               void (*commit)(struct gk104_fifo *, int runl,
+                              struct nvkm_memory *, int entries);
        } *runlist;
 
        struct gk104_fifo_user_user {
@@ -81,8 +87,11 @@ int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
                    int index, int nr, struct nvkm_fifo **);
 void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
 void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
+void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
 
+extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
+int gk104_fifo_pbdma_nr(struct gk104_fifo *);
+void gk104_fifo_pbdma_init(struct gk104_fifo *);
 extern const struct nvkm_enum gk104_fifo_fault_access[];
 extern const struct nvkm_enum gk104_fifo_fault_engine[];
 extern const struct nvkm_enum gk104_fifo_fault_reason[];
@@ -91,15 +100,30 @@ extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
 extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
 void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
                             struct nvkm_memory *, u32);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
+                              struct nvkm_memory *, int);
 
 extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
 void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
                             struct nvkm_memory *, u32);
 
-void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
+extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
+void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
 
 extern const struct nvkm_enum gm107_fifo_fault_engine[];
 extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
 
+extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
+int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+
 extern const struct nvkm_enum gp100_fifo_fault_engine[];
+
+extern const struct nvkm_enum gv100_fifo_fault_access[];
+extern const struct nvkm_enum gv100_fifo_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_fault_gpcclient[];
+void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+                            struct nvkm_memory *, u32);
+void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
+                            struct nvkm_memory *, u32);
 #endif
index ac7655a130fbdf0c78857c54593fb969db166068..8adfa6b182cbab9b703bee09a9bdb89014fa7656 100644 (file)
@@ -43,10 +43,12 @@ gk110_fifo_runlist = {
        .size = 8,
        .cgrp = gk110_fifo_runlist_cgrp,
        .chan = gk104_fifo_runlist_chan,
+       .commit = gk104_fifo_runlist_commit,
 };
 
 static const struct gk104_fifo_func
 gk110_fifo = {
+       .pbdma = &gk104_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gk104_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 5ea7e452cc660d4f2120be5194ad2d9be2848891..9553fb4af601f033e37b6bdc8878bf9fa6e9f727 100644 (file)
@@ -27,7 +27,7 @@
 #include <nvif/class.h>
 
 void
-gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
+gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
 {
        struct nvkm_device *device = fifo->base.engine.subdev.device;
        int i;
@@ -36,9 +36,16 @@ gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
                nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
 }
 
+const struct gk104_fifo_pbdma_func
+gk208_fifo_pbdma = {
+       .nr = gk104_fifo_pbdma_nr,
+       .init = gk104_fifo_pbdma_init,
+       .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
 static const struct gk104_fifo_func
 gk208_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gk208_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gk104_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 535a0eb67a5fc83eda731e5fc60ca0853ba41206..a4c6ac3cd6c70a1e8d5cef24e01ea33f5c30b871 100644 (file)
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gk20a_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gk208_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gk104_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 79ae19b1db673c6b0f95b1f6115166597c549e84..acf230764cb0bb966e915d6826e8ddeb4561f2e0 100644 (file)
@@ -41,6 +41,7 @@ gm107_fifo_runlist = {
        .size = 8,
        .cgrp = gk110_fifo_runlist_cgrp,
        .chan = gm107_fifo_runlist_chan,
+       .commit = gk104_fifo_runlist_commit,
 };
 
 const struct nvkm_enum
@@ -68,7 +69,7 @@ gm107_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gm107_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gk208_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gm107_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 49565faa854d06c72aff1259d182e1c7105004c1..b96c1c5d6577f0a12a1494077fef43d76ba23cfe 100644 (file)
 
 #include <nvif/class.h>
 
+int
+gm200_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       return nvkm_rd32(device, 0x002004) & 0x000000ff;
+}
+
+const struct gk104_fifo_pbdma_func
+gm200_fifo_pbdma = {
+       .nr = gm200_fifo_pbdma_nr,
+       .init = gk104_fifo_pbdma_init,
+       .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
 static const struct gk104_fifo_func
 gm200_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gm200_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gm107_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 46736513bd11a5bbd228f41efd8ee93e9ce86f2f..a49539b9e4ec328f24d7d3b1e4aa798b19ae92c2 100644 (file)
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gm20b_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gm200_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gm107_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index e2f8f9087d7c8dd03501c4202b3f3a5eb674a046..54377e0f6a88fb1957beca80c28e062dab08009c 100644 (file)
@@ -52,7 +52,7 @@ gp100_fifo_fault_engine[] = {
 
 static const struct gk104_fifo_func
 gp100_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gm200_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gp100_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 7733bf7c6545cf03027dce6a4fd18ed9d43550d7..778ba7e46fb36f1d10ea48d9c651df2a2bd7520a 100644 (file)
@@ -26,7 +26,7 @@
 
 static const struct gk104_fifo_func
 gp10b_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gm200_fifo_pbdma,
        .fault.access = gk104_fifo_fault_access,
        .fault.engine = gp100_fifo_fault_engine,
        .fault.reason = gk104_fifo_fault_reason,
index 118b37aea318f94b4f4af80bb476a10136b1d495..728a1edbf98c8cce3f7cb81ea8912b8097d293b5 100644 (file)
@@ -85,7 +85,7 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
        case NVKM_ENGINE_MSVLD : return 0x0270;
        case NVKM_ENGINE_VIC   : return 0x0280;
        case NVKM_ENGINE_MSENC : return 0x0290;
-       case NVKM_ENGINE_NVDEC : return 0x02100270;
+       case NVKM_ENGINE_NVDEC0: return 0x02100270;
        case NVKM_ENGINE_NVENC0: return 0x02100290;
        case NVKM_ENGINE_NVENC1: return 0x0210;
        default:
@@ -192,7 +192,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
                gk104_fifo_runlist_remove(fifo, chan);
                nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
                gk104_fifo_gpfifo_kick(chan);
-               gk104_fifo_runlist_commit(fifo, chan->runl);
+               gk104_fifo_runlist_update(fifo, chan->runl);
        }
 
        nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -213,7 +213,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
        if (list_empty(&chan->head) && !chan->killed) {
                gk104_fifo_runlist_insert(fifo, chan);
                nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
-               gk104_fifo_runlist_commit(fifo, chan->runl);
+               gk104_fifo_runlist_update(fifo, chan->runl);
                nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
        }
 }
@@ -222,6 +222,7 @@ void *
 gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
 {
        struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+       nvkm_memory_unref(&chan->mthd);
        kfree(chan->cgrp);
        return chan;
 }
@@ -240,7 +241,7 @@ gk104_fifo_gpfifo_func = {
 
 static int
 gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
-                      u64 vmm, u64 ioffset, u64 ilength,
+                      u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
                       const struct nvkm_oclass *oclass,
                       struct nvkm_object **pobject)
 {
@@ -279,6 +280,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
                return ret;
 
        *chid = chan->base.chid;
+       *inst = chan->base.inst->addr;
 
        /* Hack to support GPUs where even individual channels should be
         * part of a channel group.
@@ -315,6 +317,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
        nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
        nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
        nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+       nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
        nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
        nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
        nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
@@ -337,15 +340,19 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
        if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
                nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
                                   "ioffset %016llx ilength %08x "
-                                  "runlist %016llx\n",
+                                  "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
-                          args->v0.ilength, args->v0.runlist);
+                          args->v0.ilength, args->v0.runlist, args->v0.priv);
+               if (args->v0.priv && !oclass->client->super)
+                       return -EINVAL;
                return gk104_fifo_gpfifo_new_(fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
                                               args->v0.vmm,
                                               args->v0.ioffset,
                                               args->v0.ilength,
+                                             &args->v0.inst,
+                                              args->v0.priv,
                                              oclass, pobject);
        }
 
index 9598853ced56244285591c732bcd0cd4e44e0c4a..a7462cf59d65cb1c6afc5388d5f3f9e211db64d4 100644 (file)
 #include <core/client.h>
 #include <core/gpuobj.h>
 
-#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
 #include <nvif/unpack.h>
 
+static u32
+gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
+{
+       return chan->chid;
+}
+
 static int
 gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
 {
@@ -56,7 +62,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
        return ret;
 }
 
-static int
+int
 gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
                              struct nvkm_engine *engine, bool suspend)
 {
@@ -79,7 +85,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
        return ret;
 }
 
-static int
+int
 gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
                              struct nvkm_engine *engine)
 {
@@ -100,8 +106,8 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
        return gv100_fifo_gpfifo_engine_valid(chan, false, true);
 }
 
-const struct nvkm_fifo_chan_func
-gv100_fifo_gpfifo_func = {
+static const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo = {
        .dtor = gk104_fifo_gpfifo_dtor,
        .init = gk104_fifo_gpfifo_init,
        .fini = gk104_fifo_gpfifo_fini,
@@ -110,19 +116,23 @@ gv100_fifo_gpfifo_func = {
        .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
        .engine_init = gv100_fifo_gpfifo_engine_init,
        .engine_fini = gv100_fifo_gpfifo_engine_fini,
+       .submit_token = gv100_fifo_gpfifo_submit_token,
 };
 
-static int
-gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
-                      u64 vmm, u64 ioffset, u64 ilength,
-                      const struct nvkm_oclass *oclass,
+int
+gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
+                      struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+                      u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+                      u32 *token, const struct nvkm_oclass *oclass,
                       struct nvkm_object **pobject)
 {
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
        struct gk104_fifo_chan *chan;
        int runlist = ffs(*runlists) -1, ret, i;
        unsigned long engm;
        u64 subdevs = 0;
-       u64 usermem;
+       u64 usermem, mthd;
+       u32 size;
 
        if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
                return -EINVAL;
@@ -142,14 +152,15 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
        chan->runl = runlist;
        INIT_LIST_HEAD(&chan->head);
 
-       ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
-                                 0x1000, 0x1000, true, vmm, 0, subdevs,
-                                 1, fifo->user.bar->addr, 0x200,
+       ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
+                                 0, subdevs, 1, fifo->user.bar->addr, 0x200,
                                  oclass, &chan->base);
        if (ret)
                return ret;
 
        *chid = chan->base.chid;
+       *inst = chan->base.inst->addr;
+       *token = chan->base.func->submit_token(&chan->base);
 
        /* Hack to support GPUs where even individual channels should be
         * part of a channel group.
@@ -173,6 +184,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
        nvkm_done(fifo->user.mem);
        usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
 
+       /* Allocate fault method buffer (magics come from nvgpu). */
+       size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+       size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+       size = roundup(size, PAGE_SIZE);
+
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+                             &chan->mthd);
+       if (ret)
+               return ret;
+
+       mthd = nvkm_memory_bar2(chan->mthd);
+       if (mthd == ~0ULL)
+               return -EFAULT;
+
        /* RAMFC */
        nvkm_kmap(chan->base.inst);
        nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -184,13 +209,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
                                          (ilength << 16));
        nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
        nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
-       nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
+       nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
        nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
-       nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
+       nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
        nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
        nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
-       nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
-       nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+       nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+       nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
        nvkm_done(chan->base.inst);
        return gv100_fifo_gpfifo_engine_valid(chan, true, true);
 }
@@ -201,7 +226,7 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
 {
        struct nvkm_object *parent = oclass->parent;
        union {
-               struct kepler_channel_gpfifo_a_v0 v0;
+               struct volta_channel_gpfifo_a_v0 v0;
        } *args = data;
        int ret = -ENOSYS;
 
@@ -209,15 +234,20 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
        if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
                nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
                                   "ioffset %016llx ilength %08x "
-                                  "runlist %016llx\n",
+                                  "runlist %016llx priv %d\n",
                           args->v0.version, args->v0.vmm, args->v0.ioffset,
-                          args->v0.ilength, args->v0.runlist);
-               return gv100_fifo_gpfifo_new_(fifo,
+                          args->v0.ilength, args->v0.runlist, args->v0.priv);
+               if (args->v0.priv && !oclass->client->super)
+                       return -EINVAL;
+               return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
                                              &args->v0.runlist,
                                              &args->v0.chid,
                                               args->v0.vmm,
                                               args->v0.ioffset,
                                               args->v0.ilength,
+                                             &args->v0.inst,
+                                              args->v0.priv,
+                                             &args->v0.token,
                                              oclass, pobject);
        }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
new file mode 100644 (file)
index 0000000..ff70484
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+{
+       struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+       return (chan->runl << 16) | chan->base.chid;
+}
+
+static const struct nvkm_fifo_chan_func
+tu104_fifo_gpfifo = {
+       .dtor = gk104_fifo_gpfifo_dtor,
+       .init = gk104_fifo_gpfifo_init,
+       .fini = gk104_fifo_gpfifo_fini,
+       .ntfy = gf100_fifo_chan_ntfy,
+       .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+       .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+       .engine_init = gv100_fifo_gpfifo_engine_init,
+       .engine_fini = gv100_fifo_gpfifo_engine_fini,
+       .submit_token = tu104_fifo_gpfifo_submit_token,
+};
+
+int
+tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+                     void *data, u32 size, struct nvkm_object **pobject)
+{
+       struct nvkm_object *parent = oclass->parent;
+       union {
+               struct volta_channel_gpfifo_a_v0 v0;
+       } *args = data;
+       int ret = -ENOSYS;
+
+       nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+       if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+               nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+                                  "ioffset %016llx ilength %08x "
+                                  "runlist %016llx priv %d\n",
+                          args->v0.version, args->v0.vmm, args->v0.ioffset,
+                          args->v0.ilength, args->v0.runlist, args->v0.priv);
+               if (args->v0.priv && !oclass->client->super)
+                       return -EINVAL;
+               return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+                                             &args->v0.runlist,
+                                             &args->v0.chid,
+                                              args->v0.vmm,
+                                              args->v0.ioffset,
+                                              args->v0.ilength,
+                                             &args->v0.inst,
+                                              args->v0.priv,
+                                             &args->v0.token,
+                                             oclass, pobject);
+       }
+
+       return ret;
+}
index 4e1d159c0ae7b1981bdffc099b9ecae6b5a4bf03..6ee1bb32a071c0bd9c51e0cf3063049d206e06f2 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <nvif/class.h>
 
-static void
+void
 gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
                        struct nvkm_memory *memory, u32 offset)
 {
@@ -42,7 +42,7 @@ gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
        nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
 }
 
-static void
+void
 gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
                        struct nvkm_memory *memory, u32 offset)
 {
@@ -57,9 +57,10 @@ gv100_fifo_runlist = {
        .size = 16,
        .cgrp = gv100_fifo_runlist_cgrp,
        .chan = gv100_fifo_runlist_chan,
+       .commit = gk104_fifo_runlist_commit,
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_gpcclient[] = {
        { 0x00, "T1_0" },
        { 0x01, "T1_1" },
@@ -161,7 +162,7 @@ gv100_fifo_fault_gpcclient[] = {
        {}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_hubclient[] = {
        { 0x00, "VIP" },
        { 0x01, "CE0" },
@@ -223,7 +224,7 @@ gv100_fifo_fault_hubclient[] = {
        {}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_reason[] = {
        { 0x00, "PDE" },
        { 0x01, "PDE_SIZE" },
@@ -271,7 +272,7 @@ gv100_fifo_fault_engine[] = {
        {}
 };
 
-static const struct nvkm_enum
+const struct nvkm_enum
 gv100_fifo_fault_access[] = {
        { 0x0, "VIRT_READ" },
        { 0x1, "VIRT_WRITE" },
@@ -287,7 +288,7 @@ gv100_fifo_fault_access[] = {
 
 static const struct gk104_fifo_func
 gv100_fifo = {
-       .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+       .pbdma = &gm200_fifo_pbdma,
        .fault.access = gv100_fifo_fault_access,
        .fault.engine = gv100_fifo_fault_engine,
        .fault.reason = gv100_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
new file mode 100644 (file)
index 0000000..98c8070
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+                         struct nvkm_memory *mem, int nr)
+{
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       u64 addr = nvkm_memory_addr(mem);
+       /*XXX: target? */
+
+       nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
+       nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
+       nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
+
+       /*XXX: how to wait? can you even wait? */
+}
+
+const struct gk104_fifo_runlist_func
+tu104_fifo_runlist = {
+       .size = 16,
+       .cgrp = gv100_fifo_runlist_cgrp,
+       .chan = gv100_fifo_runlist_chan,
+       .commit = tu104_fifo_runlist_commit,
+};
+
+static const struct nvkm_enum
+tu104_fifo_fault_engine[] = {
+       { 0x01, "DISPLAY" },
+       { 0x03, "PTP" },
+       { 0x06, "PWR_PMU" },
+       { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+       { 0x09, "PERF" },
+       { 0x1f, "PHYSICAL" },
+       { 0x20, "HOST0" },
+       { 0x21, "HOST1" },
+       { 0x22, "HOST2" },
+       { 0x23, "HOST3" },
+       { 0x24, "HOST4" },
+       { 0x25, "HOST5" },
+       { 0x26, "HOST6" },
+       { 0x27, "HOST7" },
+       { 0x28, "HOST8" },
+       { 0x29, "HOST9" },
+       { 0x2a, "HOST10" },
+       { 0x2b, "HOST11" },
+       { 0x2c, "HOST12" },
+       { 0x2d, "HOST13" },
+       { 0x2e, "HOST14" },
+       { 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+       { 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+       {}
+};
+
+static void
+tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       const u32 mask = (1 << fifo->pbdma_nr) - 1;
+       /*XXX: this is a bit of a guess at this point in time. */
+       nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
+}
+
+static const struct gk104_fifo_pbdma_func
+tu104_fifo_pbdma = {
+       .nr = gm200_fifo_pbdma_nr,
+       .init = tu104_fifo_pbdma_init,
+       .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+tu104_fifo = {
+       .pbdma = &tu104_fifo_pbdma,
+       .fault.access = gv100_fifo_fault_access,
+       .fault.engine = tu104_fifo_fault_engine,
+       .fault.reason = gv100_fifo_fault_reason,
+       .fault.hubclient = gv100_fifo_fault_hubclient,
+       .fault.gpcclient = gv100_fifo_fault_gpcclient,
+       .runlist = &tu104_fifo_runlist,
+       .user = {{-1,-1,VOLTA_USERMODE_A       }, tu104_fifo_user_new   },
+       .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+       .cgrp_force = true,
+};
+
+int
+tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+       return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+}
index ed840921ebe8cf5e7a90dc86903102b78c2ea973..14b0c6bde8ebbdfcef22fdca8a3ae4fc8fcffe21 100644 (file)
@@ -3,4 +3,6 @@
 #include "priv.h"
 int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
                        struct nvkm_object **);
+int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+                       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
new file mode 100644 (file)
index 0000000..8f98548
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+                   enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+       struct nvkm_device *device = object->engine->subdev.device;
+       *addr = 0xbb0000 + device->func->resource_addr(device, 0);
+       *size = 0x010000;
+       *type = NVKM_OBJECT_MAP_IO;
+       return 0;
+}
+
+static const struct nvkm_object_func
+tu104_fifo_user = {
+       .map = tu104_fifo_user_map,
+};
+
+int
+tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                   struct nvkm_object **pobject)
+{
+       return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+}
index 14be41f24155a737a1eff869f5778380fae803c4..427340153640111c8c1b418000df37ff31a85785 100644 (file)
@@ -197,7 +197,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
        case NVKM_SUBDEV_PMU:
                debug_reg = 0xc08;
                break;
-       case NVKM_ENGINE_NVDEC:
+       case NVKM_ENGINE_NVDEC0:
                debug_reg = 0xd00;
                break;
        case NVKM_ENGINE_SEC2:
index e5830453813d43014daa13c16b85c5f211e4156d..ab0282dc07369e801071b2cae4a5c45b844975e5 100644 (file)
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/bar/gf100.o
 nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
+nvkm-y += nvkm/subdev/bar/tu104.o
index 243f0a5c8a62530007c7815d226d46b3fb7f0fbd..209a6a40834a0f21d6a341f008337a2730f848bc 100644 (file)
@@ -36,6 +36,16 @@ nvkm_bar_bar1_vmm(struct nvkm_device *device)
        return device->bar->func->bar1.vmm(device->bar);
 }
 
+void
+nvkm_bar_bar1_reset(struct nvkm_device *device)
+{
+       struct nvkm_bar *bar = device->bar;
+       if (bar) {
+               bar->func->bar1.init(bar);
+               bar->func->bar1.wait(bar);
+       }
+}
+
 struct nvkm_vmm *
 nvkm_bar_bar2_vmm(struct nvkm_device *device)
 {
@@ -48,6 +58,16 @@ nvkm_bar_bar2_vmm(struct nvkm_device *device)
        return NULL;
 }
 
+void
+nvkm_bar_bar2_reset(struct nvkm_device *device)
+{
+       struct nvkm_bar *bar = device->bar;
+       if (bar && bar->bar2) {
+               bar->func->bar2.init(bar);
+               bar->func->bar2.wait(bar);
+       }
+}
+
 void
 nvkm_bar_bar2_fini(struct nvkm_device *device)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
new file mode 100644 (file)
index 0000000..ecaead1
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+static void
+tu104_bar_bar2_wait(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0xb80f50) & 0x0000000c))
+                       break;
+       );
+}
+
+static void
+tu104_bar_bar2_fini(struct nvkm_bar *bar)
+{
+       nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar2_init(struct nvkm_bar *base)
+{
+       struct nvkm_device *device = base->subdev.device;
+       struct gf100_bar *bar = gf100_bar(base);
+       u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+       if (bar->bar2_halve)
+               addr |= 0x40000000;
+       nvkm_wr32(device, 0xb80f48, 0x80000000 | addr);
+}
+
+static void
+tu104_bar_bar1_wait(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0xb80f50) & 0x00000003))
+                       break;
+       );
+}
+
+static void
+tu104_bar_bar1_fini(struct nvkm_bar *bar)
+{
+       nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar1_init(struct nvkm_bar *base)
+{
+       struct nvkm_device *device = base->subdev.device;
+       struct gf100_bar *bar = gf100_bar(base);
+       const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+       nvkm_wr32(device, 0xb80f40, 0x80000000 | addr);
+}
+
+static const struct nvkm_bar_func
+tu104_bar = {
+       .dtor = gf100_bar_dtor,
+       .oneinit = gf100_bar_oneinit,
+       .bar1.init = tu104_bar_bar1_init,
+       .bar1.fini = tu104_bar_bar1_fini,
+       .bar1.wait = tu104_bar_bar1_wait,
+       .bar1.vmm = gf100_bar_bar1_vmm,
+       .bar2.init = tu104_bar_bar2_init,
+       .bar2.fini = tu104_bar_bar2_fini,
+       .bar2.wait = tu104_bar_bar2_wait,
+       .bar2.vmm = gf100_bar_bar2_vmm,
+       .flush = g84_bar_flush,
+};
+
+int
+tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+       return gf100_bar_new_(&tu104_bar, device, index, pbar);
+}
index 50a4369264841d57cd268dec05352360184e157f..3ef505a5c01b20a99a695ca7d00bdd05d512fb1f 100644 (file)
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/devinit/gf100.o
 nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
+nvkm-y += nvkm/subdev/devinit/tu104.o
index 17235e940ca9e354226836b4bb9e8582a48e1f90..59940dacc2ba028939d3822c0e736248a5af9359 100644 (file)
@@ -105,6 +105,15 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
        return pmu_exec(init, pmu.init_addr_pmu), 0;
 }
 
+void
+gm200_devinit_preos(struct nv50_devinit *init, bool post)
+{
+       /* Optional: Execute PRE_OS application on PMU, which should at
+        * least take care of fans until a full PMU has been loaded.
+        */
+       pmu_load(init, 0x01, post, NULL, NULL);
+}
+
 int
 gm200_devinit_post(struct nvkm_devinit *base, bool post)
 {
@@ -156,10 +165,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                        return -ETIMEDOUT;
        }
 
-       /* Optional: Execute PRE_OS application on PMU, which should at
-        * least take care of fans until a full PMU has been loaded.
-        */
-       pmu_load(init, 0x01, post, NULL, NULL);
+       gm200_devinit_preos(init, post);
        return 0;
 }
 
index 9b9f0dc1e19288b80200f3f4f4a54f5da849bf4e..72d130bb7f7cd2c4a9a00b1573cf2fbffbdf8d34 100644 (file)
@@ -26,4 +26,5 @@ void gf100_devinit_preinit(struct nvkm_devinit *);
 u64  gm107_devinit_disable(struct nvkm_devinit *);
 
 int gm200_devinit_post(struct nvkm_devinit *, bool);
+void gm200_devinit_preos(struct nv50_devinit *, bool);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
new file mode 100644 (file)
index 0000000..aae87b3
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+       struct nvkm_subdev *subdev = &init->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvbios_pll info;
+       int head = type - PLL_VPLL0;
+       int N, fN, M, P;
+       int ret;
+
+       ret = nvbios_pll_parse(device->bios, type, &info);
+       if (ret)
+               return ret;
+
+       ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+       if (ret < 0)
+               return ret;
+
+       switch (info.type) {
+       case PLL_VPLL0:
+       case PLL_VPLL1:
+       case PLL_VPLL2:
+       case PLL_VPLL3:
+               nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
+               nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
+                                                           (N <<  8) |
+                                                           (M <<  0));
+               /*XXX*/
+               nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900);
+               nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014);
+               break;
+       default:
+               nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+tu104_devinit_post(struct nvkm_devinit *base, bool post)
+{
+       struct nv50_devinit *init = nv50_devinit(base);
+       gm200_devinit_preos(init, post);
+       return 0;
+}
+
+static const struct nvkm_devinit_func
+tu104_devinit = {
+       .init = nv50_devinit_init,
+       .post = tu104_devinit_post,
+       .pll_set = tu104_devinit_pll_set,
+       .disable = gm107_devinit_disable,
+};
+
+int
+tu104_devinit_new(struct nvkm_device *device, int index,
+               struct nvkm_devinit **pinit)
+{
+       return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+}
index 45bb46fb0929a4638cb354eca51038817f7797ab..794eb1745b2fcc808aaaf035b0c8f0715936eb5e 100644 (file)
@@ -1,3 +1,4 @@
 nvkm-y += nvkm/subdev/fault/base.o
 nvkm-y += nvkm/subdev/fault/gp100.o
 nvkm-y += nvkm/subdev/fault/gv100.o
+nvkm-y += nvkm/subdev/fault/tu104.o
index 16ad91c91a7beca11a8febfbb91a54f3b34ac008..4ba1e21e8fdac6a557489c7bcadcf1ceee780527 100644 (file)
 
 #include <core/memory.h>
 #include <core/notify.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
 
 static void
 nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
 {
        struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
-       fault->func->buffer.fini(fault->buffer[index]);
+       fault->func->buffer.intr(fault->buffer[index], false);
 }
 
 static void
 nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
 {
        struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
-       fault->func->buffer.init(fault->buffer[index]);
+       fault->func->buffer.intr(fault->buffer[index], true);
 }
 
 static int
@@ -91,7 +89,6 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
 {
        struct nvkm_subdev *subdev = &fault->subdev;
        struct nvkm_device *device = subdev->device;
-       struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
        struct nvkm_fault_buffer *buffer;
        int ret;
 
@@ -99,7 +96,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
                return -ENOMEM;
        buffer->fault = fault;
        buffer->id = id;
-       buffer->entries = fault->func->buffer.entries(buffer);
+       fault->func->buffer.info(buffer);
        fault->buffer[id] = buffer;
 
        nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
@@ -110,12 +107,12 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
        if (ret)
                return ret;
 
-       ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
-                          &buffer->vma);
-       if (ret)
-               return ret;
+       /* Pin fault buffer in BAR2. */
+       buffer->addr = nvkm_memory_bar2(buffer->mem);
+       if (buffer->addr == ~0ULL)
+               return -EFAULT;
 
-       return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
+       return 0;
 }
 
 static int
@@ -146,7 +143,6 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
 static void *
 nvkm_fault_dtor(struct nvkm_subdev *subdev)
 {
-       struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
        struct nvkm_fault *fault = nvkm_fault(subdev);
        int i;
 
@@ -154,7 +150,6 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
 
        for (i = 0; i < fault->buffer_nr; i++) {
                if (fault->buffer[i]) {
-                       nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
                        nvkm_memory_unref(&fault->buffer[i]->mem);
                        kfree(fault->buffer[i]);
                }
index 5e71db2e8d750378a6746438949d23c4ae4f6676..8fb96fe614f9dead4075bc233fdd5c9bd9750491 100644 (file)
  */
 #include "priv.h"
 
-#include <subdev/mmu.h>
+#include <subdev/mc.h>
+
+static void
+gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+       struct nvkm_device *device = buffer->fault->subdev.device;
+       nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+}
 
 static void
 gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
@@ -34,15 +41,17 @@ static void
 gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
-       nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
-       nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
+       nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
+       nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
        nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
 }
 
-static u32
-gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
-       return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+       buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+       buffer->get = 0x002a7c;
+       buffer->put = 0x002a80;
 }
 
 static void
@@ -56,9 +65,10 @@ gp100_fault = {
        .intr = gp100_fault_intr,
        .buffer.nr = 1,
        .buffer.entry_size = 32,
-       .buffer.entries = gp100_fault_buffer_entries,
+       .buffer.info = gp100_fault_buffer_info,
        .buffer.init = gp100_fault_buffer_init,
        .buffer.fini = gp100_fault_buffer_fini,
+       .buffer.intr = gp100_fault_buffer_intr,
 };
 
 int
index 3cd610d7deb5268f1e73fcebbd4e49aa50519cfc..6fc54e17c9354d17f0dba41f2915b0bd72f70728 100644 (file)
@@ -30,9 +30,8 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
        struct nvkm_memory *mem = buffer->mem;
-       const u32 foff = buffer->id * 0x14;
-       u32 get = nvkm_rd32(device, 0x100e2c + foff);
-       u32 put = nvkm_rd32(device, 0x100e30 + foff);
+       u32 get = nvkm_rd32(device, buffer->get);
+       u32 put = nvkm_rd32(device, buffer->put);
        if (put == get)
                return;
 
@@ -51,7 +50,7 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 
                if (++get == buffer->entries)
                        get = 0;
-               nvkm_wr32(device, 0x100e2c + foff, get);
+               nvkm_wr32(device, buffer->get, get);
 
                info.addr   = ((u64)addrhi << 32) | addrlo;
                info.inst   = ((u64)insthi << 32) | instlo;
@@ -70,13 +69,21 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
 }
 
 static void
-gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
        const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
-       const u32 foff = buffer->id * 0x14;
+       if (enable)
+               nvkm_mask(device, 0x100a2c, intr, intr);
+       else
+               nvkm_mask(device, 0x100a34, intr, intr);
+}
 
-       nvkm_mask(device, 0x100a34, intr, intr);
+static void
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+       struct nvkm_device *device = buffer->fault->subdev.device;
+       const u32 foff = buffer->id * 0x14;
        nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
 }
 
@@ -84,23 +91,25 @@ static void
 gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
-       const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
        const u32 foff = buffer->id * 0x14;
 
        nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
-       nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
-       nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
+       nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
+       nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
        nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
-       nvkm_mask(device, 0x100a2c, intr, intr);
 }
 
-static u32
-gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
 {
        struct nvkm_device *device = buffer->fault->subdev.device;
        const u32 foff = buffer->id * 0x14;
+
        nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
-       return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+
+       buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+       buffer->get = 0x100e2c + foff;
+       buffer->put = 0x100e30 + foff;
 }
 
 static int
@@ -166,6 +175,8 @@ static void
 gv100_fault_fini(struct nvkm_fault *fault)
 {
        nvkm_notify_put(&fault->nrpfb);
+       if (fault->buffer[0])
+               fault->func->buffer.fini(fault->buffer[0]);
        nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
 }
 
@@ -173,14 +184,15 @@ static void
 gv100_fault_init(struct nvkm_fault *fault)
 {
        nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
+       fault->func->buffer.init(fault->buffer[0]);
        nvkm_notify_get(&fault->nrpfb);
 }
 
-static int
+int
 gv100_fault_oneinit(struct nvkm_fault *fault)
 {
        return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
-                               gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+                               gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
                                &fault->nrpfb);
 }
 
@@ -192,9 +204,10 @@ gv100_fault = {
        .intr = gv100_fault_intr,
        .buffer.nr = 2,
        .buffer.entry_size = 32,
-       .buffer.entries = gv100_fault_buffer_entries,
+       .buffer.info = gv100_fault_buffer_info,
        .buffer.init = gv100_fault_buffer_init,
        .buffer.fini = gv100_fault_buffer_fini,
+       .buffer.intr = gv100_fault_buffer_intr,
 };
 
 int
index e4d2f5234fd19be82125e7266bf89e3db1e4f563..8ca8b2876dadf30b1b6eb16c2b1acdff8b091624 100644 (file)
@@ -12,8 +12,10 @@ struct nvkm_fault_buffer {
        struct nvkm_fault *fault;
        int id;
        int entries;
+       u32 get;
+       u32 put;
        struct nvkm_memory *mem;
-       struct nvkm_vma *vma;
+       u64 addr;
 };
 
 int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
@@ -27,9 +29,12 @@ struct nvkm_fault_func {
        struct {
                int nr;
                u32 entry_size;
-               u32 (*entries)(struct nvkm_fault_buffer *);
+               void (*info)(struct nvkm_fault_buffer *);
                void (*init)(struct nvkm_fault_buffer *);
                void (*fini)(struct nvkm_fault_buffer *);
+               void (*intr)(struct nvkm_fault_buffer *, bool enable);
        } buffer;
 };
+
+int gv100_fault_oneinit(struct nvkm_fault *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
new file mode 100644 (file)
index 0000000..9c8a3ad
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+       /*XXX: Earlier versions of RM touched the old regs on Turing,
+        *     which don't appear to actually work anymore, but newer
+        *     versions of RM don't appear to touch anything at all..
+        */
+}
+
+static void
+tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+       struct nvkm_device *device = buffer->fault->subdev.device;
+       const u32 foff = buffer->id * 0x20;
+       nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+       struct nvkm_device *device = buffer->fault->subdev.device;
+       const u32 foff = buffer->id * 0x20;
+
+       nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
+       nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
+       nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
+       nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x80000000);
+}
+
+static void
+tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+{
+       struct nvkm_device *device = buffer->fault->subdev.device;
+       const u32 foff = buffer->id * 0x20;
+
+       nvkm_mask(device, 0xb83010 + foff, 0x40000000, 0x40000000);
+
+       buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff;
+       buffer->get = 0xb83008 + foff;
+       buffer->put = 0xb8300c + foff;
+}
+
+static void
+tu104_fault_intr_fault(struct nvkm_fault *fault)
+{
+       struct nvkm_subdev *subdev = &fault->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_fault_data info;
+       const u32 addrlo = nvkm_rd32(device, 0xb83080);
+       const u32 addrhi = nvkm_rd32(device, 0xb83084);
+       const u32  info0 = nvkm_rd32(device, 0xb83088);
+       const u32 insthi = nvkm_rd32(device, 0xb8308c);
+       const u32  info1 = nvkm_rd32(device, 0xb83090);
+
+       info.addr = ((u64)addrhi << 32) | addrlo;
+       info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
+       info.time = 0;
+       info.engine = (info0 & 0x000000ff);
+       info.valid  = (info1 & 0x80000000) >> 31;
+       info.gpc    = (info1 & 0x1f000000) >> 24;
+       info.hub    = (info1 & 0x00100000) >> 20;
+       info.access = (info1 & 0x000f0000) >> 16;
+       info.client = (info1 & 0x00007f00) >> 8;
+       info.reason = (info1 & 0x0000001f);
+
+       nvkm_fifo_fault(device->fifo, &info);
+}
+
+static void
+tu104_fault_intr(struct nvkm_fault *fault)
+{
+       struct nvkm_subdev *subdev = &fault->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0xb83094);
+
+       if (stat & 0x80000000) {
+               tu104_fault_intr_fault(fault);
+               nvkm_wr32(device, 0xb83094, 0x80000000);
+               stat &= ~0x80000000;
+       }
+
+       if (stat & 0x00000200) {
+               if (fault->buffer[0]) {
+                       nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+                       stat &= ~0x00000200;
+               }
+       }
+
+       /*XXX: guess, can't confirm until we get fw... */
+       if (stat & 0x00000100) {
+               if (fault->buffer[1]) {
+                       nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+                       stat &= ~0x00000100;
+               }
+       }
+
+       if (stat) {
+               nvkm_debug(subdev, "intr %08x\n", stat);
+       }
+}
+
+static void
+tu104_fault_fini(struct nvkm_fault *fault)
+{
+       nvkm_notify_put(&fault->nrpfb);
+       if (fault->buffer[0])
+               fault->func->buffer.fini(fault->buffer[0]);
+       /*XXX: disable priv faults */
+}
+
+static void
+tu104_fault_init(struct nvkm_fault *fault)
+{
+       /*XXX: enable priv faults */
+       fault->func->buffer.init(fault->buffer[0]);
+       nvkm_notify_get(&fault->nrpfb);
+}
+
+static const struct nvkm_fault_func
+tu104_fault = {
+       .oneinit = gv100_fault_oneinit,
+       .init = tu104_fault_init,
+       .fini = tu104_fault_fini,
+       .intr = tu104_fault_intr,
+       .buffer.nr = 2,
+       .buffer.entry_size = 32,
+       .buffer.info = tu104_fault_buffer_info,
+       .buffer.init = tu104_fault_buffer_init,
+       .buffer.fini = tu104_fault_buffer_fini,
+       .buffer.intr = tu104_fault_buffer_intr,
+};
+
+int
+tu104_fault_new(struct nvkm_device *device, int index,
+               struct nvkm_fault **pfault)
+{
+       return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+}
index 434d2fc5bb1ce90c92c16299208ba5a290cbbe06..b2bb5a3ccb02b34a7158eee49d7fec2aa5559626 100644 (file)
@@ -68,10 +68,13 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
 
        if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
                switch (M0203E.type) {
-               case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
-               case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
-               case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
-               case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
+               case M0203E_TYPE_DDR2  : return NVKM_RAM_TYPE_DDR2;
+               case M0203E_TYPE_DDR3  : return NVKM_RAM_TYPE_DDR3;
+               case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
+               case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
+               case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
+               case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
+               case M0203E_TYPE_HBM2  : return NVKM_RAM_TYPE_HBM2;
                default:
                        nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
                        return NVKM_RAM_TYPE_UNKNOWN;
index 24c7bd50573169054047634ffc98f546fa2f2a2a..b11867f682cb933fe60996092e29357e4aab8154 100644 (file)
@@ -184,6 +184,9 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
                [NVKM_RAM_TYPE_GDDR3  ] = "GDDR3",
                [NVKM_RAM_TYPE_GDDR4  ] = "GDDR4",
                [NVKM_RAM_TYPE_GDDR5  ] = "GDDR5",
+               [NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
+               [NVKM_RAM_TYPE_GDDR6  ] = "GDDR6",
+               [NVKM_RAM_TYPE_HBM2   ] = "HBM2",
        };
        struct nvkm_subdev *subdev = &fb->subdev;
        int ret;
index db48a1daca0c7a3d786332ce25435839fcc10760..02c4eb28cef44db11ef989f34cab124b74d4f84b 100644 (file)
@@ -288,6 +288,19 @@ nv50_instobj_addr(struct nvkm_memory *memory)
        return nvkm_memory_addr(nv50_instobj(memory)->ram);
 }
 
+static u64
+nv50_instobj_bar2(struct nvkm_memory *memory)
+{
+       struct nv50_instobj *iobj = nv50_instobj(memory);
+       u64 addr = ~0ULL;
+       if (nv50_instobj_acquire(&iobj->base.memory)) {
+               iobj->lru.next = NULL; /* Exclude from eviction. */
+               addr = iobj->bar->addr;
+       }
+       nv50_instobj_release(&iobj->base.memory);
+       return addr;
+}
+
 static enum nvkm_memory_target
 nv50_instobj_target(struct nvkm_memory *memory)
 {
@@ -325,8 +338,9 @@ static const struct nvkm_memory_func
 nv50_instobj_func = {
        .dtor = nv50_instobj_dtor,
        .target = nv50_instobj_target,
-       .size = nv50_instobj_size,
+       .bar2 = nv50_instobj_bar2,
        .addr = nv50_instobj_addr,
+       .size = nv50_instobj_size,
        .boot = nv50_instobj_boot,
        .acquire = nv50_instobj_acquire,
        .release = nv50_instobj_release,
index 2befbe36dc28dcf02757dd5f0de718f392046167..f3b06329c338bf43223d6f05ef278e25ce7ef01f 100644 (file)
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/mc/gk104.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
 nvkm-y += nvkm/subdev/mc/gp100.o
 nvkm-y += nvkm/subdev/mc/gp10b.o
+nvkm-y += nvkm/subdev/mc/tu104.o
index 09f669ac663090ecc7615cf3dbb504915fcc3323..0e57ab2a709f4376eb0a96c324ea25f2ba2ae1c4 100644 (file)
@@ -108,6 +108,9 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
        if (stat)
                nvkm_error(&mc->subdev, "intr %08x\n", stat);
        *handled = intr != 0;
+
+       if (mc->func->intr_hack)
+               mc->func->intr_hack(mc, handled);
 }
 
 static u32
index d9e3691d45b7976bf4b5ef9782aea8f7e685487c..eb91a4cf452bd7d6a342bd93a44e17f08a9f7ff6 100644 (file)
@@ -26,6 +26,7 @@ struct nvkm_mc_func {
        void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
        /* retrieve pending interrupt mask (NV_PMC_INTR) */
        u32 (*intr_stat)(struct nvkm_mc *);
+       void (*intr_hack)(struct nvkm_mc *, bool *handled);
        const struct nvkm_mc_map *reset;
        void (*unk260)(struct nvkm_mc *, u32);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
new file mode 100644 (file)
index 0000000..b7165bd
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+{
+       struct nvkm_device *device = mc->subdev.device;
+       u32 stat = nvkm_rd32(device, 0xb81010);
+       if (stat & 0x00000050) {
+               struct nvkm_subdev *subdev =
+                       nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
+               nvkm_wr32(device, 0xb81010, stat & 0x00000050);
+               if (subdev)
+                       nvkm_subdev_intr(subdev);
+               *handled = true;
+       }
+}
+
+static const struct nvkm_mc_func
+tu104_mc = {
+       .init = nv50_mc_init,
+       .intr = gp100_mc_intr,
+       .intr_unarm = gp100_mc_intr_unarm,
+       .intr_rearm = gp100_mc_intr_rearm,
+       .intr_mask = gp100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
+       .intr_hack = tu104_mc_intr_hack,
+       .reset = gk104_mc_reset,
+};
+
+int
+tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+       return gp100_mc_new_(&tu104_mc, device, index, pmc);
+}
index 58a24e3a05985d0ef01e6c72fc4fed79e86d4b3e..8966180b36ccd62f0170a3750a153506026242b5 100644 (file)
@@ -13,6 +13,7 @@ nvkm-y += nvkm/subdev/mmu/gm20b.o
 nvkm-y += nvkm/subdev/mmu/gp100.o
 nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
+nvkm-y += nvkm/subdev/mmu/tu104.o
 
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -33,6 +34,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
 nvkm-y += nvkm/subdev/mmu/vmmgp100.o
 nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
 nvkm-y += nvkm/subdev/mmu/vmmgv100.o
+nvkm-y += nvkm/subdev/mmu/vmmtu104.o
 
 nvkm-y += nvkm/subdev/mmu/umem.o
 nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
new file mode 100644 (file)
index 0000000..8e6f409
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+tu104_mmu = {
+       .dma_bits = 47,
+       .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+       .mem = {{ -1,  0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+       .vmm = {{ -1,  0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+       .kind = gm200_mmu_kind,
+       .kind_sys = true,
+};
+
+int
+tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+       return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+}
index 37b201b95f15bcab36374fdc4b7478e5a324e118..6889076097ecaf3f97686a7a570bd1d170d09042 100644 (file)
@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
                        goto fail;
                }
 
-               if (vma->addr != addr) {
-                       const u64 tail = vma->size + vma->addr - addr;
-                       if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
-                               goto fail;
-                       vma->part = true;
-                       nvkm_vmm_node_insert(vmm, vma);
-               }
-
-               if (vma->size != size) {
-                       const u64 tail = vma->size - size;
-                       struct nvkm_vma *tmp;
-                       if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
-                               nvkm_vmm_unmap_region(vmm, vma);
-                               goto fail;
-                       }
-                       tmp->part = true;
-                       nvkm_vmm_node_insert(vmm, tmp);
+               vma = nvkm_vmm_node_split(vmm, vma, addr, size);
+               if (!vma) {
+                       ret = -ENOMEM;
+                       goto fail;
                }
        }
        vma->busy = true;
index 7459def78d504f006a2f7f0b625ba3372238b7ec..6b87fff014b3c5b672308f7467d02fb0ace418c5 100644 (file)
@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
        return new;
 }
 
+static inline void
+nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+       rb_erase(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+       nvkm_vmm_free_remove(vmm, vma);
+       list_del(&vma->head);
+       kfree(vma);
+}
+
 static void
 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        rb_insert_color(&vma->tree, &vmm->free);
 }
 
-void
+static inline void
+nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+       rb_erase(&vma->tree, &vmm->root);
+}
+
+static inline void
+nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+       nvkm_vmm_node_remove(vmm, vma);
+       list_del(&vma->head);
+       kfree(vma);
+}
+
+static void
 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
        struct rb_node **ptr = &vmm->root.rb_node;
@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
        return NULL;
 }
 
+#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL :             \
+       list_entry((root)->head.dir, struct nvkm_vma, head))
+
+static struct nvkm_vma *
+nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
+                   struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
+{
+       if (next) {
+               if (vma->size == size) {
+                       vma->size += next->size;
+                       nvkm_vmm_node_delete(vmm, next);
+                       if (prev) {
+                               prev->size += vma->size;
+                               nvkm_vmm_node_delete(vmm, vma);
+                               return prev;
+                       }
+                       return vma;
+               }
+               BUG_ON(prev);
+
+               nvkm_vmm_node_remove(vmm, next);
+               vma->size -= size;
+               next->addr -= size;
+               next->size += size;
+               nvkm_vmm_node_insert(vmm, next);
+               return next;
+       }
+
+       if (prev) {
+               if (vma->size != size) {
+                       nvkm_vmm_node_remove(vmm, vma);
+                       prev->size += size;
+                       vma->addr += size;
+                       vma->size -= size;
+                       nvkm_vmm_node_insert(vmm, vma);
+               } else {
+                       prev->size += vma->size;
+                       nvkm_vmm_node_delete(vmm, vma);
+               }
+               return prev;
+       }
+
+       return vma;
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_split(struct nvkm_vmm *vmm,
+                   struct nvkm_vma *vma, u64 addr, u64 size)
+{
+       struct nvkm_vma *prev = NULL;
+
+       if (vma->addr != addr) {
+               prev = vma;
+               if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
+                       return NULL;
+               vma->part = true;
+               nvkm_vmm_node_insert(vmm, vma);
+       }
+
+       if (vma->size != size) {
+               struct nvkm_vma *tmp;
+               if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+                       nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
+                       return NULL;
+               }
+               tmp->part = true;
+               nvkm_vmm_node_insert(vmm, tmp);
+       }
+
+       return vma;
+}
+
 static void
 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
 {
@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
        return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
 }
 
-#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL :              \
-       list_entry((root)->head.dir, struct nvkm_vma, head)
-
 void
 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 {
-       struct nvkm_vma *next;
+       struct nvkm_vma *next = node(vma, next);
+       struct nvkm_vma *prev = NULL;
 
        nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
        nvkm_memory_unref(&vma->memory);
 
-       if (vma->part) {
-               struct nvkm_vma *prev = node(vma, prev);
-               if (!prev->memory) {
-                       prev->size += vma->size;
-                       rb_erase(&vma->tree, &vmm->root);
-                       list_del(&vma->head);
-                       kfree(vma);
-                       vma = prev;
-               }
-       }
-
-       next = node(vma, next);
-       if (next && next->part) {
-               if (!next->memory) {
-                       vma->size += next->size;
-                       rb_erase(&next->tree, &vmm->root);
-                       list_del(&next->head);
-                       kfree(next);
-               }
-       }
+       if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+               prev = NULL;
+       if (!next->part || next->memory)
+               next = NULL;
+       nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
 }
 
 void
@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        struct nvkm_vma *prev, *next;
 
        if ((prev = node(vma, prev)) && !prev->used) {
-               rb_erase(&prev->tree, &vmm->free);
-               list_del(&prev->head);
                vma->addr  = prev->addr;
                vma->size += prev->size;
-               kfree(prev);
+               nvkm_vmm_free_delete(vmm, prev);
        }
 
        if ((next = node(vma, next)) && !next->used) {
-               rb_erase(&next->tree, &vmm->free);
-               list_del(&next->head);
                vma->size += next->size;
-               kfree(next);
+               nvkm_vmm_free_delete(vmm, next);
        }
 
        nvkm_vmm_free_insert(vmm, vma);
@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
        }
 
        /* Remove VMA from the list of allocated nodes. */
-       rb_erase(&vma->tree, &vmm->root);
+       nvkm_vmm_node_remove(vmm, vma);
 
        /* Merge VMA back into the free list. */
        vma->page = NVKM_VMA_PAGE_NONE;
@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
                        tail = ALIGN_DOWN(tail, vmm->func->page_block);
 
                if (addr <= tail && tail - addr >= size) {
-                       rb_erase(&this->tree, &vmm->free);
+                       nvkm_vmm_free_remove(vmm, this);
                        vma = this;
                        break;
                }
index 1a3b0a3724ca76ec697f8ab9948bb854847f2b2d..42ad326521a3391a6bd6442eb4a2630842002fd1 100644 (file)
@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
                  u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
                  const char *name, struct nvkm_vmm *);
 struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
+                                    u64 addr, u64 size);
 int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
                        bool sparse, u8 page, u8 align, u64 size,
                        struct nvkm_vma **pvma);
@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
 void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
 
 struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
-void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
 
 int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
                  u64, u64, void *, u32, struct lock_class_key *,
@@ -200,6 +201,8 @@ int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
 int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
 void gp100_vmm_flush(struct nvkm_vmm *, int);
 
+int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
 int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                 struct lock_class_key *, const char *, struct nvkm_vmm **);
 int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
@@ -239,6 +242,9 @@ int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
 int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
                  struct lock_class_key *, const char *,
                  struct nvkm_vmm **);
+int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+                 struct lock_class_key *, const char *,
+                 struct nvkm_vmm **);
 
 #define VMM_PRINT(l,v,p,f,a...) do {                                           \
        struct nvkm_vmm *_vmm = (v);                                           \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
new file mode 100644 (file)
index 0000000..adaadd9
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+       struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 type = depth << 24; /*XXX: not confirmed */
+
+       type = 0x00000001; /* PAGE_ALL */
+       if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+               type |= 0x00000004; /* HUB_ONLY */
+
+       mutex_lock(&subdev->mutex);
+
+       nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+       nvkm_wr32(device, 0xb830a4, 0x00000000);
+       nvkm_wr32(device, 0x100e68, 0x00000000);
+       nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
+
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
+                       break;
+       );
+
+       mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+tu104_vmm = {
+       .join = gv100_vmm_join,
+       .part = gf100_vmm_part,
+       .aper = gf100_vmm_aper,
+       .valid = gp100_vmm_valid,
+       .flush = tu104_vmm_flush,
+       .page = {
+               { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+               { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+               { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+               { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+               { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+               { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+               {}
+       }
+};
+
+int
+tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+             void *argv, u32 argc, struct lock_class_key *key,
+             const char *name, struct nvkm_vmm **pvmm)
+{
+       return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
+                            argv, argc, key, name, pvmm);
+}
index 1f7a3c1a7f5061b74e89febd1213b493bacab9b6..84a2f243ed9bd16a59bf752f1f01602c72519c03 100644 (file)
@@ -59,10 +59,10 @@ gp102_run_secure_scrub(struct nvkm_secboot *sb)
 
        nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
 
-       engine = nvkm_engine_ref(&device->nvdec->engine);
+       engine = nvkm_engine_ref(&device->nvdec[0]->engine);
        if (IS_ERR(engine))
                return PTR_ERR(engine);
-       falcon = device->nvdec->falcon;
+       falcon = device->nvdec[0]->falcon;
 
        nvkm_falcon_get(falcon, &sb->subdev);
 
index 36de23d12ae460e336d1f4917434881afe26a5a5..dd922033628c236cfec38f3e91d49aa9ba886e78 100644 (file)
  */
 #include "priv.h"
 
+s64
+nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
+{
+       struct nvkm_subdev *subdev = &wait->tmr->subdev;
+       u64 time = nvkm_timer_read(wait->tmr);
+
+       if (wait->reads == 0) {
+               wait->time0 = time;
+               wait->time1 = time;
+       }
+
+       if (wait->time1 == time) {
+               if (wait->reads++ == 16) {
+                       nvkm_fatal(subdev, "stalled at %016llx\n", time);
+                       return -ETIMEDOUT;
+               }
+       } else {
+               wait->time1 = time;
+               wait->reads = 1;
+       }
+
+       if (wait->time1 - wait->time0 > wait->limit)
+               return -ETIMEDOUT;
+
+       return wait->time1 - wait->time0;
+}
+
+void
+nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
+                    struct nvkm_timer_wait *wait)
+{
+       wait->tmr = device->timer;
+       wait->limit = nsec;
+       wait->reads = 0;
+}
+
 u64
 nvkm_timer_read(struct nvkm_timer *tmr)
 {
index 4f1f3e890650601a4514e2aaa84805d346bb8d1b..39081eadfd84cd29422c612ae715593bb8940b0c 100644 (file)
@@ -86,7 +86,7 @@ gk104_top_oneinit(struct nvkm_top *top)
                case 0x0000000d: A_(SEC2  ); break;
                case 0x0000000e: B_(NVENC ); break;
                case 0x0000000f: A_(NVENC1); break;
-               case 0x00000010: A_(NVDEC ); break;
+               case 0x00000010: B_(NVDEC ); break;
                case 0x00000013: B_(CE    ); break;
                        break;
                default:
index 0a693fede05ebf218bb9bd8a077be9178cf56e7d..30f85f0130cbb172ef9292097acb3c56c0adcbb7 100644 (file)
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 
        qxl_bo_ref(bo);
        entry->tv.bo = &bo->tbo;
-       entry->tv.shared = false;
+       entry->tv.num_shared = 0;
        list_add_tail(&entry->tv.head, &release->bos);
        return 0;
 }
index 1ae31dbc61c64a2fbf30ede3a84fa0ea315fe8b5..f43305329939856bbfad5837cca510e3c8c5450b 100644 (file)
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                }
 
                p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
-               p->relocs[i].tv.shared = !r->write_domain;
+               p->relocs[i].tv.num_shared = !r->write_domain;
 
                radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
                                      priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
 
                resv = reloc->robj->tbo.resv;
                r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
-                                    reloc->tv.shared);
+                                    reloc->tv.num_shared);
                if (r)
                        return r;
        }
index 27d8e7dd2d0676c4f369041be3bcb5b95774a1b3..44617dec8183373c5bd278502d54337f93b4ad77 100644 (file)
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        INIT_LIST_HEAD(&list);
 
        tv.bo = &bo_va->bo->tbo;
-       tv.shared = true;
+       tv.num_shared = 1;
        list_add(&tv.head, &list);
 
        vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
index a3d2ca07a058d11d5133801c7c51e3691aa2dc92..0d374211661c23107baee8cf6c0e643acd22a823 100644 (file)
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
        list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
        list[0].tv.bo = &vm->page_directory->tbo;
-       list[0].tv.shared = true;
+       list[0].tv.num_shared = 1;
        list[0].tiling_flags = 0;
        list_add(&list[0].tv.head, head);
 
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
                list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
                list[idx].tv.bo = &list[idx].robj->tbo;
-               list[idx].tv.shared = true;
+               list[idx].tv.num_shared = 1;
                list[idx].tiling_flags = 0;
                list_add(&list[idx++].tv.head, head);
        }
index 9d4cd196037a7abfda0268363460691ccc5b45fd..dbb69063b3d5e21ee7f3b696cd54d198e7ee92be 100644 (file)
@@ -211,6 +211,62 @@ void drm_sched_fault(struct drm_gpu_scheduler *sched)
 }
 EXPORT_SYMBOL(drm_sched_fault);
 
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+       unsigned long sched_timeout, now = jiffies;
+
+       sched_timeout = sched->work_tdr.timer.expires;
+
+       /*
+        * Modify the timeout to an arbitrarily large value. This also prevents
+        * the timeout to be restarted when new submissions arrive
+        */
+       if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+                       && time_after(sched_timeout, now))
+               return sched_timeout - now;
+       else
+               return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+               unsigned long remaining)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched->job_list_lock, flags);
+
+       if (list_empty(&sched->ring_mirror_list))
+               cancel_delayed_work(&sched->work_tdr);
+       else
+               mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -218,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
        struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
                                                   finish_work);
        struct drm_gpu_scheduler *sched = s_job->sched;
+       unsigned long flags;
 
        /*
         * Canceling the timeout without removing our job from the ring mirror
@@ -228,12 +285,12 @@ static void drm_sched_job_finish(struct work_struct *work)
         */
        cancel_delayed_work_sync(&sched->work_tdr);
 
-       spin_lock(&sched->job_list_lock);
+       spin_lock_irqsave(&sched->job_list_lock, flags);
        /* remove job from ring_mirror_list */
        list_del_init(&s_job->node);
        /* queue TDR for next job */
        drm_sched_start_timeout(sched);
-       spin_unlock(&sched->job_list_lock);
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
        sched->ops->free_job(s_job);
 }
@@ -249,20 +306,22 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
 static void drm_sched_job_begin(struct drm_sched_job *s_job)
 {
        struct drm_gpu_scheduler *sched = s_job->sched;
+       unsigned long flags;
 
        dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
                               drm_sched_job_finish_cb);
 
-       spin_lock(&sched->job_list_lock);
+       spin_lock_irqsave(&sched->job_list_lock, flags);
        list_add_tail(&s_job->node, &sched->ring_mirror_list);
        drm_sched_start_timeout(sched);
-       spin_unlock(&sched->job_list_lock);
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 
 static void drm_sched_job_timedout(struct work_struct *work)
 {
        struct drm_gpu_scheduler *sched;
        struct drm_sched_job *job;
+       unsigned long flags;
 
        sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
        job = list_first_entry_or_null(&sched->ring_mirror_list,
@@ -271,9 +330,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
        if (job)
                job->sched->ops->timedout_job(job);
 
-       spin_lock(&sched->job_list_lock);
+       spin_lock_irqsave(&sched->job_list_lock, flags);
        drm_sched_start_timeout(sched);
-       spin_unlock(&sched->job_list_lock);
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 
 /**
@@ -287,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
 {
        struct drm_sched_job *s_job;
        struct drm_sched_entity *entity, *tmp;
+       unsigned long flags;
        int i;
 
-       spin_lock(&sched->job_list_lock);
+       spin_lock_irqsave(&sched->job_list_lock, flags);
        list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
                if (s_job->s_fence->parent &&
                    dma_fence_remove_callback(s_job->s_fence->parent,
@@ -299,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
                        atomic_dec(&sched->hw_rq_count);
                }
        }
-       spin_unlock(&sched->job_list_lock);
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
        if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
                atomic_inc(&bad->karma);
@@ -337,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 {
        struct drm_sched_job *s_job, *tmp;
        bool found_guilty = false;
+       unsigned long flags;
        int r;
 
-       spin_lock(&sched->job_list_lock);
+       spin_lock_irqsave(&sched->job_list_lock, flags);
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
                struct dma_fence *fence;
@@ -353,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
                if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
                        dma_fence_set_error(&s_fence->finished, -ECANCELED);
 
-               spin_unlock(&sched->job_list_lock);
+               spin_unlock_irqrestore(&sched->job_list_lock, flags);
                fence = sched->ops->run_job(s_job);
                atomic_inc(&sched->hw_rq_count);
 
@@ -372,10 +433,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
                                drm_sched_expel_job_unlocked(s_job);
                        drm_sched_process_job(NULL, &s_fence->cb);
                }
-               spin_lock(&sched->job_list_lock);
+               spin_lock_irqsave(&sched->job_list_lock, flags);
        }
        drm_sched_start_timeout(sched);
-       spin_unlock(&sched->job_list_lock);
+       spin_unlock_irqrestore(&sched->job_list_lock, flags);
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
@@ -612,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
                   long timeout,
                   const char *name)
 {
-       int i;
+       int i, ret;
        sched->ops = ops;
        sched->hw_submission_limit = hw_submission;
        sched->name = name;
@@ -633,8 +694,10 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
        /* Each scheduler will run on a seperate kernel thread */
        sched->thread = kthread_run(drm_sched_main, sched, sched->name);
        if (IS_ERR(sched->thread)) {
+               ret = PTR_ERR(sched->thread);
+               sched->thread = NULL;
                DRM_ERROR("Failed to create scheduler for %s.\n", name);
-               return PTR_ERR(sched->thread);
+               return ret;
        }
 
        sched->ready = true;
index a2f753205a3ef0fabab47a3ac0c43e9b1d408ddf..9d2bcdf8bc29e9775ef4f6553dc85e2b660f7237 100644 (file)
@@ -53,7 +53,7 @@ static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
        int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
 
        if (x1 >= x2 || y1 >= y2) {
-               pr_err("Cannot have damage clip with no dimention.\n");
+               pr_err("Cannot have damage clip with no dimension.\n");
                return false;
        }
 
index f80e82e164759457ae02f85ecb7354930c0f344f..607a6ea17ecc534b0f8433b63fff0383f1bde190 100644 (file)
@@ -1978,6 +1978,23 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+       unsigned int i;
+
+       if (!dc->soc->wgrps)
+               return true;
+
+       for (i = 0; i < dc->soc->num_wgrps; i++) {
+               const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+               if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+                       return true;
+       }
+
+       return false;
+}
+
 static int tegra_dc_init(struct host1x_client *client)
 {
        struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1993,22 +2010,8 @@ static int tegra_dc_init(struct host1x_client *client)
         * assign a primary plane to them, which in turn will cause KMS to
         * crash.
         */
-       if (dc->soc->wgrps) {
-               bool has_wgrps = false;
-               unsigned int i;
-
-               for (i = 0; i < dc->soc->num_wgrps; i++) {
-                       const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
-
-                       if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
-                               has_wgrps = true;
-                               break;
-                       }
-               }
-
-               if (!has_wgrps)
-                       return 0;
-       }
+       if (!tegra_dc_has_window_groups(dc))
+               return 0;
 
        dc->syncpt = host1x_syncpt_request(client, flags);
        if (!dc->syncpt)
@@ -2094,6 +2097,9 @@ static int tegra_dc_exit(struct host1x_client *client)
        struct tegra_dc *dc = host1x_client_to_dc(client);
        int err;
 
+       if (!tegra_dc_has_window_groups(dc))
+               return 0;
+
        devm_free_irq(dc->dev, dc->irq, dc);
 
        err = tegra_dc_rgb_exit(dc);
index 65ea4988b332e2d121ddd482c79d70f7ecc57ac5..4b70ce664c4185e36f8e173dfd43353eee404fa5 100644 (file)
@@ -1274,6 +1274,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
        { .compatible = "nvidia,tegra194-display", },
        { .compatible = "nvidia,tegra194-dc", },
        { .compatible = "nvidia,tegra194-sor", },
+       { .compatible = "nvidia,tegra194-vic", },
        { /* sentinel */ }
 };
 
index f685e72949d16b59c74cff8a230a08871eff747c..352d05feabb09507db689f8106fcef00705c76c3 100644 (file)
@@ -141,9 +141,9 @@ int falcon_load_firmware(struct falcon *falcon)
        /* allocate iova space for the firmware */
        falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
                                                    &falcon->firmware.paddr);
-       if (!falcon->firmware.vaddr) {
-               dev_err(falcon->dev, "dma memory mapping failed\n");
-               return -ENOMEM;
+       if (IS_ERR(falcon->firmware.vaddr)) {
+               dev_err(falcon->dev, "DMA memory mapping failed\n");
+               return PTR_ERR(falcon->firmware.vaddr);
        }
 
        /* copy firmware image into local area. this also ensures endianness */
@@ -197,11 +197,19 @@ void falcon_exit(struct falcon *falcon)
 int falcon_boot(struct falcon *falcon)
 {
        unsigned long offset;
+       u32 value;
        int err;
 
        if (!falcon->firmware.vaddr)
                return -EINVAL;
 
+       err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+                                (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+                                          FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+                                10, 10000);
+       if (err < 0)
+               return err;
+
        falcon_writel(falcon, 0, FALCON_DMACTL);
 
        /* setup the address of the binary data so Falcon can access it later */
index 6112d90429799582098c01a3282fc571aa665517..922a48d5a483fbc217197e09984d9d1c7c7c2ed1 100644 (file)
@@ -742,7 +742,9 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
 
 static int tegra_display_hub_probe(struct platform_device *pdev)
 {
+       struct device_node *child = NULL;
        struct tegra_display_hub *hub;
+       struct clk *clk;
        unsigned int i;
        int err;
 
@@ -801,6 +803,34 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
                        return err;
        }
 
+       hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+       hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+                                     GFP_KERNEL);
+       if (!hub->clk_heads)
+               return -ENOMEM;
+
+       for (i = 0; i < hub->num_heads; i++) {
+               child = of_get_next_child(pdev->dev.of_node, child);
+               if (!child) {
+                       dev_err(&pdev->dev, "failed to find node for head %u\n",
+                               i);
+                       return -ENODEV;
+               }
+
+               clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+               if (IS_ERR(clk)) {
+                       dev_err(&pdev->dev, "failed to get clock for head %u\n",
+                               i);
+                       of_node_put(child);
+                       return PTR_ERR(clk);
+               }
+
+               hub->clk_heads[i] = clk;
+       }
+
+       of_node_put(child);
+
        /* XXX: enable clock across reset? */
        err = reset_control_assert(hub->rst);
        if (err < 0)
@@ -840,12 +870,16 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
 static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
 {
        struct tegra_display_hub *hub = dev_get_drvdata(dev);
+       unsigned int i = hub->num_heads;
        int err;
 
        err = reset_control_assert(hub->rst);
        if (err < 0)
                return err;
 
+       while (i--)
+               clk_disable_unprepare(hub->clk_heads[i]);
+
        clk_disable_unprepare(hub->clk_hub);
        clk_disable_unprepare(hub->clk_dsc);
        clk_disable_unprepare(hub->clk_disp);
@@ -856,6 +890,7 @@ static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
 static int __maybe_unused tegra_display_hub_resume(struct device *dev)
 {
        struct tegra_display_hub *hub = dev_get_drvdata(dev);
+       unsigned int i;
        int err;
 
        err = clk_prepare_enable(hub->clk_disp);
@@ -870,13 +905,22 @@ static int __maybe_unused tegra_display_hub_resume(struct device *dev)
        if (err < 0)
                goto disable_dsc;
 
+       for (i = 0; i < hub->num_heads; i++) {
+               err = clk_prepare_enable(hub->clk_heads[i]);
+               if (err < 0)
+                       goto disable_heads;
+       }
+
        err = reset_control_deassert(hub->rst);
        if (err < 0)
-               goto disable_hub;
+               goto disable_heads;
 
        return 0;
 
-disable_hub:
+disable_heads:
+       while (i--)
+               clk_disable_unprepare(hub->clk_heads[i]);
+
        clk_disable_unprepare(hub->clk_hub);
 disable_dsc:
        clk_disable_unprepare(hub->clk_dsc);
index 6696a85fc1f204389f2973a6839e4eddca06a1d7..479087c0705a78a65acadc6c1bbe82cbe377d1ca 100644 (file)
@@ -49,6 +49,9 @@ struct tegra_display_hub {
        struct clk *clk_hub;
        struct reset_control *rst;
 
+       unsigned int num_heads;
+       struct clk **clk_heads;
+
        const struct tegra_display_hub_soc *soc;
        struct tegra_windowgroup *wgrps;
 };
index b129da2e5afd5ede520bb60e442cc18fb5624d46..ef8692b7075ab0f82262b0aae3cb6c57f160d160 100644 (file)
@@ -19,6 +19,8 @@
 
 #include <soc/tegra/pmc.h>
 
+#include <sound/hda_verbs.h>
+
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_dp_helper.h>
 #include <drm/drm_panel.h>
 #include "sor.h"
 #include "trace.h"
 
-/*
- * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
- * merged. Having this around after the commit is merged should be safe since
- * the preprocessor will effectively replace all occurrences and therefore no
- * duplicate will be defined.
- */
-#define TEGRA_IO_PAD_HDMI_DP0 26
-
 #define SOR_REKEY 0x38
 
 struct tegra_sor_hdmi_settings {
@@ -407,6 +401,7 @@ struct tegra_sor {
        const struct tegra_sor_soc *soc;
        void __iomem *regs;
        unsigned int index;
+       unsigned int irq;
 
        struct reset_control *rst;
        struct clk *clk_parent;
@@ -433,6 +428,11 @@ struct tegra_sor {
 
        struct delayed_work scdc;
        bool scdc_enabled;
+
+       struct {
+               unsigned int sample_rate;
+               unsigned int channels;
+       } audio;
 };
 
 struct tegra_sor_state {
@@ -2139,6 +2139,144 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
        return 0;
 }
 
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+       size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+       for (i = 0; i < length; i++)
+               tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+                                SOR_AUDIO_HDA_ELD_BUFWR);
+
+       /*
+        * The HDA codec will always report an ELD buffer size of 96 bytes and
+        * the HDA codec driver will check that each byte read from the buffer
+        * is valid. Therefore every byte must be written, even if no 96 bytes
+        * were parsed from EDID.
+        */
+       for (i = length; i < 96; i++)
+               tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+       u32 value;
+
+       tegra_sor_write_eld(sor);
+
+       value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+       tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+       tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+       u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+       struct hdmi_audio_infoframe frame;
+       u32 value;
+       int err;
+
+       err = hdmi_audio_infoframe_init(&frame);
+       if (err < 0) {
+               dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+               return err;
+       }
+
+       frame.channels = sor->audio.channels;
+
+       err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+               return err;
+       }
+
+       tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+       value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+       value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+       value |= INFOFRAME_CTRL_ENABLE;
+       tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+       return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+       /* select HDA audio input */
+       value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+       value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+       /* inject null samples */
+       if (sor->audio.channels != 2)
+               value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+       else
+               value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+       value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+       tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+       /* enable advertising HBR capability */
+       tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+
+       tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+       value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+               SOR_HDMI_SPARE_CTS_RESET(1) |
+               SOR_HDMI_SPARE_HW_CTS_ENABLE;
+       tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+       /* enable HW CTS */
+       value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+       tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+       /* allow packet to be sent */
+       value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+       tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+       /* reset N counter and enable lookup */
+       value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+       tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+       value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+       tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+       tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+       tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+       tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+       tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+       tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+       tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+       tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+       value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+       tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+       tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+       value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+       tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+       tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+       value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+       tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+       tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+       value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+       value &= ~SOR_HDMI_AUDIO_N_RESET;
+       tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+       tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
 static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
 {
        u32 value;
@@ -2148,6 +2286,11 @@ static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
        tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
 }
 
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+       tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
 static struct tegra_sor_hdmi_settings *
 tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
 {
@@ -2243,6 +2386,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
        u32 value;
        int err;
 
+       tegra_sor_audio_unprepare(sor);
        tegra_sor_hdmi_scdc_stop(sor);
 
        err = tegra_sor_detach(sor);
@@ -2651,6 +2795,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
                dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
 
        tegra_sor_hdmi_scdc_start(sor);
+       tegra_sor_audio_prepare(sor);
 }
 
 static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2666,6 +2811,7 @@ static int tegra_sor_init(struct host1x_client *client)
        struct tegra_sor *sor = host1x_client_to_sor(client);
        int connector = DRM_MODE_CONNECTOR_Unknown;
        int encoder = DRM_MODE_ENCODER_NONE;
+       u32 value;
        int err;
 
        if (!sor->aux) {
@@ -2759,6 +2905,15 @@ static int tegra_sor_init(struct host1x_client *client)
        if (err < 0)
                return err;
 
+       /*
+        * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+        * is used for interoperability between the HDA codec driver and the
+        * HDMI/DP driver.
+        */
+       value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+       tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+       tegra_sor_writel(sor, value, SOR_INT_MASK);
+
        return 0;
 }
 
@@ -2767,6 +2922,9 @@ static int tegra_sor_exit(struct host1x_client *client)
        struct tegra_sor *sor = host1x_client_to_sor(client);
        int err;
 
+       tegra_sor_writel(sor, 0, SOR_INT_MASK);
+       tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+
        tegra_output_exit(&sor->output);
 
        if (sor->aux) {
@@ -3037,6 +3195,54 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
        return 0;
 }
 
+static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
+                                  unsigned int *channels)
+{
+       unsigned int mul, div;
+
+       if (format & AC_FMT_BASE_44K)
+               *rate = 44100;
+       else
+               *rate = 48000;
+
+       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+       *rate = *rate * (mul + 1) / (div + 1);
+
+       *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+       struct tegra_sor *sor = data;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_INT_STATUS);
+       tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+       if (value & SOR_INT_CODEC_SCRATCH0) {
+               value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+               if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+                       unsigned int format, sample_rate, channels;
+
+                       format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+                       tegra_hda_parse_format(format, &sample_rate, &channels);
+
+                       sor->audio.sample_rate = sample_rate;
+                       sor->audio.channels = channels;
+
+                       tegra_sor_hdmi_audio_enable(sor);
+               } else {
+                       tegra_sor_hdmi_audio_disable(sor);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int tegra_sor_probe(struct platform_device *pdev)
 {
        struct device_node *np;
@@ -3119,14 +3325,38 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
-       if (!pdev->dev.pm_domain) {
-               sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-               if (IS_ERR(sor->rst)) {
-                       err = PTR_ERR(sor->rst);
+       err = platform_get_irq(pdev, 0);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+               goto remove;
+       }
+
+       sor->irq = err;
+
+       err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+                              dev_name(sor->dev), sor);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+               goto remove;
+       }
+
+       sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+       if (IS_ERR(sor->rst)) {
+               err = PTR_ERR(sor->rst);
+
+               if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
                        dev_err(&pdev->dev, "failed to get reset control: %d\n",
                                err);
                        goto remove;
                }
+
+               /*
+                * At this point, the reset control is most likely being used
+                * by the generic power domain implementation. With any luck
+                * the power domain will have taken care of resetting the SOR
+                * and we don't have to do anything.
+                */
+               sor->rst = NULL;
        }
 
        sor->clk = devm_clk_get(&pdev->dev, NULL);
index fb0854d92a2790f6390bb5ec6fd79dbacea114e6..13f7e68bec42f35527f7b5a5576e3a74074ed367 100644 (file)
 #define  INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
 #define  INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
 
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define  SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define  SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
 #define SOR_HDMI_CTRL 0xc0
 #define  SOR_HDMI_CTRL_ENABLE (1 << 30)
 #define  SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
 #define  SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
 #define  SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
 
+#define SOR_HDMI_SPARE 0xcb
+#define  SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define  SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define  SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
 #define SOR_REFCLK 0xe6
 #define  SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
 #define  SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
 #define  SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
 #define  SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
 
+#define SOR_AUDIO_CNTRL 0xfc
+#define  SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define  SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define   SOURCE_SELECT_MASK 0x3
+#define   SOURCE_SELECT_HDA 0x2
+#define   SOURCE_SELECT_SPDIF 0x1
+#define   SOURCE_SELECT_AUTO 0x0
+#define  SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define  SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define  SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define  SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define  SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define  SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define  SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define  SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define  SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
 #define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
 #define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
 #define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
 
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
 #define SOR_HDMI2_CTRL 0x13e
 #define  SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
 #define  SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
index 9f657a63b0bb4a59943c436f3fece4efc460b074..d47983deb1cff6ecd5fee2be06b59091c16115d6 100644 (file)
@@ -38,6 +38,7 @@ struct vic {
        struct iommu_domain *domain;
        struct device *dev;
        struct clk *clk;
+       struct reset_control *rst;
 
        /* Platform configuration */
        const struct vic_config *config;
@@ -56,13 +57,37 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
 static int vic_runtime_resume(struct device *dev)
 {
        struct vic *vic = dev_get_drvdata(dev);
+       int err;
+
+       err = clk_prepare_enable(vic->clk);
+       if (err < 0)
+               return err;
+
+       usleep_range(10, 20);
+
+       err = reset_control_deassert(vic->rst);
+       if (err < 0)
+               goto disable;
+
+       usleep_range(10, 20);
+
+       return 0;
 
-       return clk_prepare_enable(vic->clk);
+disable:
+       clk_disable_unprepare(vic->clk);
+       return err;
 }
 
 static int vic_runtime_suspend(struct device *dev)
 {
        struct vic *vic = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(vic->rst);
+       if (err < 0)
+               return err;
+
+       usleep_range(2000, 4000);
 
        clk_disable_unprepare(vic->clk);
 
@@ -282,10 +307,18 @@ static const struct vic_config vic_t186_config = {
        .version = 0x18,
 };
 
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+       .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+       .version = 0x19,
+};
+
 static const struct of_device_id vic_match[] = {
        { .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
        { .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
        { .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+       { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
        { },
 };
 
@@ -323,6 +356,14 @@ static int vic_probe(struct platform_device *pdev)
                return PTR_ERR(vic->clk);
        }
 
+       if (!dev->pm_domain) {
+               vic->rst = devm_reset_control_get(dev, "vic");
+               if (IS_ERR(vic->rst)) {
+                       dev_err(&pdev->dev, "failed to get reset\n");
+                       return PTR_ERR(vic->rst);
+               }
+       }
+
        vic->falcon.dev = dev;
        vic->falcon.regs = vic->regs;
        vic->falcon.ops = &vic_falcon_ops;
@@ -418,3 +459,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
 MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
 #endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
index efa005a1c1b79d3e7faa36f23a09419c95cf53a8..93860346c42600b455a3f4db1394cc43cc74ae11 100644 (file)
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                }
 
                if (!ret) {
-                       if (!entry->shared)
+                       if (!entry->num_shared)
                                continue;
 
-                       ret = reservation_object_reserve_shared(bo->resv, 1);
+                       ret = reservation_object_reserve_shared(bo->resv,
+                                                               entry->num_shared);
                        if (!ret)
                                continue;
                }
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        }
                }
 
-               if (!ret && entry->shared)
-                       ret = reservation_object_reserve_shared(bo->resv, 1);
+               if (!ret && entry->num_shared)
+                       ret = reservation_object_reserve_shared(bo->resv,
+                                                               entry->num_shared);
 
                if (unlikely(ret != 0)) {
                        if (ret == -EINTR)
@@ -199,7 +201,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
 
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
-               if (entry->shared)
+               if (entry->num_shared)
                        reservation_object_add_shared_fence(bo->resv, fence);
                else
                        reservation_object_add_excl_fence(bo->resv, fence);
index 8a029bade32a8561e0cf12e86d1e6eb28a69b002..3025bfc001a1d2302fb902b5c38b08b88f330e15 100644 (file)
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
                        struct ttm_validate_buffer val_buf;
 
                        val_buf.bo = bo;
-                       val_buf.shared = false;
+                       val_buf.num_shared = 0;
                        res->func->unbind(res, false, &val_buf);
                }
                res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 
        INIT_LIST_HEAD(&val_list);
        val_buf->bo = ttm_bo_reference(&res->backup->base);
-       val_buf->shared = false;
+       val_buf->num_shared = 0;
        list_add_tail(&val_buf->head, &val_list);
        ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
        if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
        BUG_ON(!func->may_evict);
 
        val_buf.bo = NULL;
-       val_buf.shared = false;
+       val_buf.num_shared = 0;
        ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
        if (unlikely(ret != 0))
                return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
                return 0;
 
        val_buf.bo = NULL;
-       val_buf.shared = false;
+       val_buf.num_shared = 0;
        if (res->backup)
                val_buf.bo = &res->backup->base;
        do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
        struct vmw_resource *res, *next;
        struct ttm_validate_buffer val_buf = {
                .bo = &vbo->base,
-               .shared = false
+               .num_shared = 0
        };
 
        lockdep_assert_held(&vbo->base.resv->lock.base);
index 184025fa938e78fd0372d255b1db552bdb95a88d..fef22753f4de61d30ea4476aff91c8d776842a9b 100644 (file)
@@ -266,7 +266,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
                val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
                if (!val_buf->bo)
                        return -ESRCH;
-               val_buf->shared = false;
+               val_buf->num_shared = 0;
                list_add_tail(&val_buf->head, &ctx->bo_list);
                bo_node->as_mob = as_mob;
                bo_node->cpu_blit = cpu_blit;
index b92016ce09b77c442d8e19f7bedba305456fe187..096017b8789d257037664273b09026b0c2463bd5 100644 (file)
@@ -13,6 +13,7 @@ host1x-y = \
        hw/host1x02.o \
        hw/host1x04.o \
        hw/host1x05.o \
-       hw/host1x06.o
+       hw/host1x06.o \
+       hw/host1x07.o
 
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
index de6bc4e7fa23960705ccd55f5c52f34a84979444..419d8929a98f8f7ca2abf13d00ab3b8f3373f6b7 100644 (file)
@@ -44,6 +44,7 @@
 #include "hw/host1x04.h"
 #include "hw/host1x05.h"
 #include "hw/host1x06.h"
+#include "hw/host1x07.h"
 
 void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -130,7 +131,19 @@ static const struct host1x_info host1x06_info = {
        .has_hypervisor = true,
 };
 
+static const struct host1x_info host1x07_info = {
+       .nb_channels = 63,
+       .nb_pts = 704,
+       .nb_mlocks = 32,
+       .nb_bases = 0,
+       .init = host1x07_init,
+       .sync_offset = 0x0,
+       .dma_mask = DMA_BIT_MASK(40),
+       .has_hypervisor = true,
+};
+
 static const struct of_device_id host1x_of_match[] = {
+       { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
        { .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
        { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
        { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
index d188f9068b9109c707d7ec6b5d9f826e6e1fe11d..95ea81172a83460d95139428d64f980f460c3263 100644 (file)
@@ -26,7 +26,6 @@
 #include "../intr.h"
 #include "../job.h"
 
-#define HOST1X_CHANNEL_SIZE 16384
 #define TRACE_MAX_LENGTH 128U
 
 static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
@@ -203,7 +202,11 @@ static void enable_gather_filter(struct host1x *host,
 static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
                               unsigned int index)
 {
-       ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+#if HOST1X_HW < 6
+       ch->regs = dev->regs + index * 0x4000;
+#else
+       ch->regs = dev->regs + index * 0x100;
+#endif
        enable_gather_filter(dev, ch);
        return 0;
 }
index b503c740c022dae1cb6d9d2bacb79cd9a045e393..8b749516c0518622f3c8549fec1a07fad5ce0c51 100644 (file)
@@ -62,9 +62,12 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
                                           struct host1x_channel *ch,
                                           struct output *o)
 {
-       u32 val, rd_ptr, wr_ptr, start, end;
+#if HOST1X_HW <= 6
+       u32 rd_ptr, wr_ptr, start, end;
        u32 payload = INVALID_PAYLOAD;
        unsigned int data_count = 0;
+#endif
+       u32 val;
 
        host1x_debug_output(o, "%u: fifo:\n", ch->id);
 
@@ -78,6 +81,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
        val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
        host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
 
+#if HOST1X_HW <= 6
        /* Peek pointer values are invalid during SLCG, so disable it */
        host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
 
@@ -127,6 +131,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 
        host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
        host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+#endif
 }
 
 static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
diff --git a/drivers/gpu/host1x/hw/host1x07.c b/drivers/gpu/host1x/hw/host1x07.c
new file mode 100644 (file)
index 0000000..04b779a
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x07.h"
+#include "host1x07_hardware.h"
+
+/* include code */
+#define HOST1X_HW 7
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x07_init(struct host1x *host)
+{
+       host->channel_op = &host1x_channel_ops;
+       host->cdma_op = &host1x_cdma_ops;
+       host->cdma_pb_op = &host1x_pushbuffer_ops;
+       host->syncpt_op = &host1x_syncpt_ops;
+       host->intr_op = &host1x_intr_ops;
+       host->debug_op = &host1x_debug_ops;
+
+       return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x07.h b/drivers/gpu/host1x/hw/host1x07.h
new file mode 100644 (file)
index 0000000..57b19f3
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X07_H
+#define HOST1X_HOST1X07_H
+
+struct host1x;
+
+int host1x07_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x07_hardware.h b/drivers/gpu/host1x/hw/host1x07_hardware.h
new file mode 100644 (file)
index 0000000..1353e7a
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra194
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X07_HARDWARE_H
+#define __HOST1X_HOST1X07_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x07_uclass.h"
+#include "hw_host1x07_vm.h"
+#include "hw_host1x07_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_wait_syncpt_indx_f(indx)
+               | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+       unsigned indx, unsigned threshold)
+{
+       return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+               | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+       unsigned indx, unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_wait_syncpt_base_indx_f(indx)
+               | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+       unsigned base_indx, unsigned offset)
+{
+       return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+               | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+       unsigned cond, unsigned indx)
+{
+       return host1x_uclass_incr_syncpt_cond_f(cond)
+               | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indbe_f(0xf)
+               | host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset);
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+       unsigned mod_id, unsigned offset, bool auto_inc)
+{
+       u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+               | host1x_uclass_indoff_indroffset_f(offset)
+               | host1x_uclass_indoff_rwn_read_v();
+       if (auto_inc)
+               v |= host1x_uclass_indoff_autoinc_f(1);
+       return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+       unsigned class_id, unsigned offset, unsigned mask)
+{
+       return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+       return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+       return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+       return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+       return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+       return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+               host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+       return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+       return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset,        unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+       return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
index 4457486c72b05e0dd5f2b3ddade15276e5fa98c7..e599e15bf999aa44a70d0c796956739b02adc126 100644 (file)
@@ -59,7 +59,7 @@ static inline u32 host1x_uclass_incr_syncpt_r(void)
        host1x_uclass_incr_syncpt_r()
 static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
 {
-       return (v & 0xff) << 8;
+       return (v & 0xff) << 10;
 }
 #define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
        host1x_uclass_incr_syncpt_cond_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
new file mode 100644 (file)
index 0000000..2b99d68
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN                       0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN                 BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x)          (0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL                    0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x)            (x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x)         ((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE             BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ                    0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS                    0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x)                (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x)                ((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x)                     (0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x)             (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x)              ((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE                      0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
new file mode 100644 (file)
index 0000000..7e4e3b3
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X07_UCLASS_H
+#define HOST1X_HW_HOST1X07_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+       host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+       return (v & 0xff) << 10;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+       host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+       host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+       return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+       host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+       host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+       host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+       return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+       host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+       return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+       host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+       host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+       return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+       host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+       return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+       host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+       return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+       host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+       return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+       host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+       return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+       return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
new file mode 100644 (file)
index 0000000..7e4629e
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART                                0x0000
+#define HOST1X_CHANNEL_DMASTART_HI                     0x0004
+#define HOST1X_CHANNEL_DMAPUT                          0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI                       0x000c
+#define HOST1X_CHANNEL_DMAGET                          0x0010
+#define HOST1X_CHANNEL_DMAGET_HI                       0x0014
+#define HOST1X_CHANNEL_DMAEND                          0x0018
+#define HOST1X_CHANNEL_DMAEND_HI                       0x001c
+#define HOST1X_CHANNEL_DMACTRL                         0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP                 BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST               BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET              BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT                    0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY              BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA                   0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET                     0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS                      0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT                     0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP                    0x0048
+#define HOST1X_CHANNEL_TEARDOWN                                0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x)                 (0x6400 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x)   (0x6464 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x)   (0x652c + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x)       (0x6590 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT(x)                          (0x8080 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)               (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x)                   (0xa604 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v)                        (((v) & 0x3f) << 8)
index a23bb3352d029303a4c3a750532a5e3a73053c0a..d946660d47f8325e7fd76bf0a88b50eeebd8b95f 100644 (file)
@@ -37,10 +37,12 @@ static void syncpt_restore(struct host1x_syncpt *sp)
  */
 static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 {
+#if HOST1X_HW < 7
        struct host1x *host = sp->host;
 
        host1x_sync_writel(host, sp->base_val,
                           HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
 }
 
 /*
@@ -48,10 +50,12 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
  */
 static void syncpt_read_wait_base(struct host1x_syncpt *sp)
 {
+#if HOST1X_HW < 7
        struct host1x *host = sp->host;
 
        sp->base_val =
                host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
 }
 
 /*
index 926379d53484ace11b5e05be242a3c53a296574a..47e19796c45087f76664b161d5e1a9b654ed9f2b 100644 (file)
@@ -331,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create(
 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+                               unsigned long remaining);
+
 #endif
index b0fdd1980034c592c5fdcb64cd05d5ee29df91d8..621615fa7728f1553afd7167f5398ae41a892a37 100644 (file)
  *
  * @head:           list head for thread-private list.
  * @bo:             refcounted buffer object pointer.
- * @shared:         should the fence be added shared?
+ * @num_shared:     How many shared fences we want to add.
  */
 
 struct ttm_validate_buffer {
        struct list_head head;
        struct ttm_buffer_object *bo;
-       bool shared;
+       unsigned int num_shared;
 };
 
 /**
index c06d0a5bdd808235850c2d3ac54c2eb07ef561b1..91a16b333c69005417b9314d192b0db0eac43b19 100644 (file)
@@ -105,14 +105,24 @@ struct drm_msm_gem_new {
        __u32 handle;         /* out */
 };
 
-#define MSM_INFO_IOVA  0x01
-
-#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+/* Get or set GEM buffer info.  The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer.  For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET    0x00   /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA      0x01   /* get iova, returned by value */
+#define MSM_INFO_SET_NAME      0x02   /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME      0x03   /* get debug name, returned by pointer */
 
 struct drm_msm_gem_info {
        __u32 handle;         /* in */
-       __u32 flags;          /* in - combination of MSM_INFO_* flags */
-       __u64 offset;         /* out, mmap() offset or iova */
+       __u32 info;           /* in - one of MSM_INFO_* */
+       __u64 value;          /* in or out */
+       __u32 len;            /* in or out */
+       __u32 pad;
 };
 
 #define MSM_PREP_READ        0x01
@@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd {
  */
 #define MSM_SUBMIT_BO_READ             0x0001
 #define MSM_SUBMIT_BO_WRITE            0x0002
+#define MSM_SUBMIT_BO_DUMP             0x0004
 
-#define MSM_SUBMIT_BO_FLAGS            (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+#define MSM_SUBMIT_BO_FLAGS            (MSM_SUBMIT_BO_READ | \
+                                       MSM_SUBMIT_BO_WRITE | \
+                                       MSM_SUBMIT_BO_DUMP)
 
 struct drm_msm_gem_submit_bo {
        __u32 flags;          /* in, mask of MSM_SUBMIT_BO_x */
index b01eb502d49c55d04f33cace28a410171239eaf5..e622fd1fbd46399c967e448c762b69b8dd3220b2 100644 (file)
@@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
        __u32 n_success;                /* to/from KFD */
 };
 
+struct kfd_ioctl_get_dmabuf_info_args {
+       __u64 size;             /* from KFD */
+       __u64 metadata_ptr;     /* to KFD */
+       __u32 metadata_size;    /* to KFD (space allocated by user)
+                                * from KFD (actual metadata size)
+                                */
+       __u32 gpu_id;   /* from KFD */
+       __u32 flags;            /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+       __u32 dmabuf_fd;        /* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+       __u64 va_addr;  /* to KFD */
+       __u64 handle;   /* from KFD */
+       __u32 gpu_id;   /* to KFD */
+       __u32 dmabuf_fd;        /* to KFD */
+};
+
 #define AMDKFD_IOCTL_BASE 'K'
 #define AMDKFD_IO(nr)                  _IO(AMDKFD_IOCTL_BASE, nr)
 #define AMDKFD_IOR(nr, type)           _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE                \
                AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
 
+#define AMDKFD_IOC_GET_DMABUF_INFO             \
+               AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF               \
+               AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
 #define AMDKFD_COMMAND_START           0x01
-#define AMDKFD_COMMAND_END             0x1C
+#define AMDKFD_COMMAND_END             0x1E
 
 #endif
index d8fc96ed11e9d29ecde7654419d1554aaeb7ab16..4ba5efe8d086a876285399b68051df0ce87fc368 100644 (file)
 #define WINCONx_BURSTLEN_8WORD                 (0x1 << 9)
 #define WINCONx_BURSTLEN_4WORD                 (0x2 << 9)
 #define WINCONx_ENWIN                          (1 << 0)
+#define WINCONx_BLEND_MODE_MASK                        (0xc2)
 
 #define WINCON0_BPPMODE_MASK                   (0xf << 2)
 #define WINCON0_BPPMODE_SHIFT                  2
 #define WINCON0_BPPMODE_24BPP_888              (0xb << 2)
 
 #define WINCON1_LOCALSEL_CAMIF                 (1 << 23)
+#define WINCON1_ALPHA_MUL                      (1 << 7)
 #define WINCON1_BLD_PIX                                (1 << 6)
 #define WINCON1_BPPMODE_MASK                   (0xf << 2)
 #define WINCON1_BPPMODE_SHIFT                  2
 #define WPALCON_W0PAL_16BPP_565                        (0x6 << 0)
 
 /* Blending equation control */
+#define BLENDEQx(_win)                         (0x244 + ((_win - 1) * 4))
+#define BLENDEQ_ZERO                           0x0
+#define BLENDEQ_ONE                            0x1
+#define BLENDEQ_ALPHA_A                                0x2
+#define BLENDEQ_ONE_MINUS_ALPHA_A              0x3
+#define BLENDEQ_ALPHA0                         0x6
+#define BLENDEQ_B_FUNC_F(_x)                   (_x << 6)
+#define BLENDEQ_A_FUNC_F(_x)                   (_x << 0)
 #define BLENDCON                               0x260
 #define BLENDCON_NEW_MASK                      (1 << 0)
 #define BLENDCON_NEW_8BIT_ALPHA_VALUE          (1 << 0)